blob: 2466bb44a23c510b55f7955affad73b74e38c34a [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonfe6c9912008-01-28 11:13:02 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
Joe Perchesd77d1b52014-03-06 12:10:45 -08007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Teiglandb3b94fa2006-01-16 16:50:04 +00009#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
Steven Whitehousef42faf42006-01-30 18:34:10 +000013#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050014#include <linux/gfs2_ondisk.h>
Bob Peterson1f466a42008-03-10 18:17:47 -050015#include <linux/prefetch.h>
Steven Whitehousef15ab562009-02-09 09:25:01 +000016#include <linux/blkdev.h>
Bob Peterson7c9ca622011-08-31 09:53:19 +010017#include <linux/rbtree.h>
Steven Whitehouse9dbe9612012-10-31 10:37:10 +000018#include <linux/random.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000019
20#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000022#include "glock.h"
23#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000024#include "lops.h"
25#include "meta_io.h"
26#include "quota.h"
27#include "rgrp.h"
28#include "super.h"
29#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050030#include "util.h"
Benjamin Marzinski172e0452007-03-23 14:51:56 -060031#include "log.h"
Steven Whitehousec8cdf472007-06-08 10:05:33 +010032#include "inode.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010033#include "trace_gfs2.h"
Andrew Price850d2d92017-12-12 11:42:30 -060034#include "dir.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000035
Steven Whitehouse2c1e52a2006-09-05 15:41:57 -040036#define BFITNOENT ((u32)~0)
Bob Peterson6760bdc2007-07-24 14:09:32 -050037#define NO_BLOCK ((u64)~0)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040038
Bob Peterson1f466a42008-03-10 18:17:47 -050039#if BITS_PER_LONG == 32
40#define LBITMASK (0x55555555UL)
41#define LBITSKIP55 (0x55555555UL)
42#define LBITSKIP00 (0x00000000UL)
43#else
44#define LBITMASK (0x5555555555555555UL)
45#define LBITSKIP55 (0x5555555555555555UL)
46#define LBITSKIP00 (0x0000000000000000UL)
47#endif
48
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040049/*
50 * These routines are used by the resource group routines (rgrp.c)
51 * to keep track of block allocation. Each block is represented by two
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040052 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
53 *
54 * 0 = Free
55 * 1 = Used (not metadata)
56 * 2 = Unlinked (still in use) inode
57 * 3 = Used (metadata)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040058 */
59
Bob Peterson5ce13432013-11-06 10:55:52 -050060struct gfs2_extent {
61 struct gfs2_rbm rbm;
62 u32 len;
63};
64
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040065static const char valid_change[16] = {
66 /* current */
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040067 /* n */ 0, 1, 1, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040068 /* e */ 1, 0, 0, 0,
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040069 /* w */ 0, 0, 0, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040070 1, 0, 0, 0
71};
72
Bob Peterson5ce13432013-11-06 10:55:52 -050073static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -050074 const struct gfs2_inode *ip, bool nowrap);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +010075
76
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040077/**
78 * gfs2_setbit - Set a bit in the bitmaps
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010079 * @rbm: The position of the bit to set
80 * @do_clone: Also set the clone bitmap, if it exists
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040081 * @new_state: the new state of the block
82 *
83 */
84
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010085static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
Bob Peterson06344b92012-04-26 12:44:35 -040086 unsigned char new_state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040087{
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000088 unsigned char *byte1, *byte2, *end, cur_state;
Bob Petersone579ed42013-09-17 13:12:15 -040089 struct gfs2_bitmap *bi = rbm_bi(rbm);
Andreas Gruenbacher281b4952018-09-26 23:32:46 +010090 unsigned int buflen = bi->bi_bytes;
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010091 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040092
Bob Petersone579ed42013-09-17 13:12:15 -040093 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
94 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040095
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000096 BUG_ON(byte1 >= end);
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040097
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000098 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040099
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000100 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
Bob Petersone54c78a2018-10-03 08:47:36 -0500101 struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
102
103 fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
Joe Perchesd77d1b52014-03-06 12:10:45 -0800104 rbm->offset, cur_state, new_state);
Bob Petersone54c78a2018-10-03 08:47:36 -0500105 fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
107 (unsigned long long)bi->bi_bh->b_blocknr);
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100108 fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
109 bi->bi_offset, bi->bi_bytes,
Bob Petersone54c78a2018-10-03 08:47:36 -0500110 (unsigned long long)gfs2_rbm_to_block(rbm));
Bob Peterson95c8e172011-03-22 10:49:12 -0400111 dump_stack();
Steven Whitehouse3e6339d2012-08-13 11:37:51 +0100112 gfs2_consist_rgrpd(rbm->rgd);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000113 return;
114 }
115 *byte1 ^= (cur_state ^ new_state) << bit;
116
Bob Petersone579ed42013-09-17 13:12:15 -0400117 if (do_clone && bi->bi_clone) {
118 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000119 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
120 *byte2 ^= (cur_state ^ new_state) << bit;
121 }
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400122}
123
124/**
125 * gfs2_testbit - test a bit in the bitmaps
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100126 * @rbm: The bit to test
Bob Petersondffe12a2018-08-07 10:07:00 -0500127 * @use_clone: If true, test the clone bitmap, not the official bitmap.
128 *
129 * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
130 * not the "real" bitmaps, to avoid allocating recently freed blocks.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400131 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100132 * Returns: The two bit block state of the requested bit
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400133 */
134
Bob Petersondffe12a2018-08-07 10:07:00 -0500135static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400136{
Bob Petersone579ed42013-09-17 13:12:15 -0400137 struct gfs2_bitmap *bi = rbm_bi(rbm);
Bob Petersondffe12a2018-08-07 10:07:00 -0500138 const u8 *buffer;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100139 const u8 *byte;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400140 unsigned int bit;
141
Bob Petersondffe12a2018-08-07 10:07:00 -0500142 if (use_clone && bi->bi_clone)
143 buffer = bi->bi_clone;
144 else
145 buffer = bi->bi_bh->b_data;
146 buffer += bi->bi_offset;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100147 byte = buffer + (rbm->offset / GFS2_NBBY);
148 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400149
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100150 return (*byte >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400151}
152
153/**
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000154 * gfs2_bit_search
155 * @ptr: Pointer to bitmap data
156 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
157 * @state: The state we are searching for
158 *
159 * We xor the bitmap data with a patter which is the bitwise opposite
160 * of what we are looking for, this gives rise to a pattern of ones
161 * wherever there is a match. Since we have two bits per entry, we
162 * take this pattern, shift it down by one place and then and it with
163 * the original. All the even bit positions (0,2,4, etc) then represent
164 * successful matches, so we mask with 0x55555..... to remove the unwanted
165 * odd bit positions.
166 *
167 * This allows searching of a whole u64 at once (32 blocks) with a
168 * single test (on 64 bit arches).
169 */
170
171static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
172{
173 u64 tmp;
174 static const u64 search[] = {
Hannes Eder075ac442009-02-21 02:11:42 +0100175 [0] = 0xffffffffffffffffULL,
176 [1] = 0xaaaaaaaaaaaaaaaaULL,
177 [2] = 0x5555555555555555ULL,
178 [3] = 0x0000000000000000ULL,
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000179 };
180 tmp = le64_to_cpu(*ptr) ^ search[state];
181 tmp &= (tmp >> 1);
182 tmp &= mask;
183 return tmp;
184}
185
186/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400187 * rs_cmp - multi-block reservation range compare
188 * @blk: absolute file system block number of the new reservation
189 * @len: number of blocks in the new reservation
190 * @rs: existing reservation to compare against
191 *
192 * returns: 1 if the block range is beyond the reach of the reservation
193 * -1 if the block range is before the start of the reservation
194 * 0 if the block range overlaps with the reservation
195 */
196static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
197{
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100198 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400199
200 if (blk >= startblk + rs->rs_free)
201 return 1;
202 if (blk + len - 1 < startblk)
203 return -1;
204 return 0;
205}
206
207/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400208 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
209 * a block in a given allocation state.
Bob Peterson886b1412012-04-11 13:03:52 -0400210 * @buf: the buffer that holds the bitmaps
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000211 * @len: the length (in bytes) of the buffer
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400212 * @goal: start search at this block's bit-pair (within @buffer)
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000213 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400214 *
215 * Scope of @goal and returned block number is only within this bitmap buffer,
216 * not entire rgrp or filesystem. @buffer will be offset from the actual
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000217 * beginning of a bitmap block buffer, skipping any header structures, but
218 * headers are always a multiple of 64 bits long so that the buffer is
219 * always aligned to a 64 bit boundary.
220 *
221 * The size of the buffer is in bytes, but is it assumed that it is
Anand Gadiyarfd589a82009-07-16 17:13:03 +0200222 * always ok to read a complete multiple of 64 bits at the end
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000223 * of the block in case the end is no aligned to a natural boundary.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400224 *
225 * Return: the block number (bitmap buffer scope) that was found
226 */
227
Hannes Eder02ab1722009-02-21 02:12:05 +0100228static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
229 u32 goal, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400230{
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000231 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
232 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
233 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
234 u64 tmp;
Hannes Eder075ac442009-02-21 02:11:42 +0100235 u64 mask = 0x5555555555555555ULL;
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000236 u32 bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400237
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000238 /* Mask off bits we don't care about at the start of the search */
239 mask <<= spoint;
240 tmp = gfs2_bit_search(ptr, mask, state);
241 ptr++;
242 while(tmp == 0 && ptr < end) {
Hannes Eder075ac442009-02-21 02:11:42 +0100243 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000244 ptr++;
Bob Peterson1f466a42008-03-10 18:17:47 -0500245 }
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000246 /* Mask off any bits which are more than len bytes from the start */
247 if (ptr == end && (len & (sizeof(u64) - 1)))
248 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
249 /* Didn't find anything, so return */
250 if (tmp == 0)
251 return BFITNOENT;
252 ptr--;
Steven Whitehoused8bd5042009-04-23 08:54:02 +0100253 bit = __ffs64(tmp);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000254 bit /= 2; /* two bits per entry in the bitmap */
255 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400256}
257
258/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100259 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
260 * @rbm: The rbm with rgd already set correctly
261 * @block: The block number (filesystem relative)
262 *
263 * This sets the bi and offset members of an rbm based on a
264 * resource group and a filesystem relative block number. The
265 * resource group must be set in the rbm on entry, the bi and
266 * offset members will be set by this function.
267 *
268 * Returns: 0 on success, or an error code
269 */
270
271static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
272{
Andreas Gruenbacher3548fce2018-10-11 19:35:50 +0200273 if (!rgrp_contains_block(rbm->rgd, block))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100274 return -E2BIG;
Bob Petersone579ed42013-09-17 13:12:15 -0400275 rbm->bii = 0;
Andreas Gruenbacher3548fce2018-10-11 19:35:50 +0200276 rbm->offset = block - rbm->rgd->rd_data0;
Bob Petersona68a0a32012-10-19 08:32:51 -0400277 /* Check if the block is within the first block */
Bob Petersone579ed42013-09-17 13:12:15 -0400278 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
Bob Petersona68a0a32012-10-19 08:32:51 -0400279 return 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100280
Bob Petersona68a0a32012-10-19 08:32:51 -0400281 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
282 rbm->offset += (sizeof(struct gfs2_rgrp) -
283 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
Bob Petersone579ed42013-09-17 13:12:15 -0400284 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
285 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100286 return 0;
287}
288
289/**
Bob Peterson149ed7f2013-09-17 13:14:35 -0400290 * gfs2_rbm_incr - increment an rbm structure
291 * @rbm: The rbm with rgd already set correctly
292 *
293 * This function takes an existing rbm structure and increments it to the next
294 * viable block offset.
295 *
296 * Returns: If incrementing the offset would cause the rbm to go past the
297 * end of the rgrp, true is returned, otherwise false.
298 *
299 */
300
301static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
302{
303 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
304 rbm->offset++;
305 return false;
306 }
307 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
308 return true;
309
310 rbm->offset = 0;
311 rbm->bii++;
312 return false;
313}
314
315/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100316 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
317 * @rbm: Position to search (value/result)
318 * @n_unaligned: Number of unaligned blocks to check
319 * @len: Decremented for each block found (terminate on zero)
320 *
321 * Returns: true if a non-free block is encountered
322 */
323
324static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
325{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100326 u32 n;
327 u8 res;
328
329 for (n = 0; n < n_unaligned; n++) {
Bob Petersondffe12a2018-08-07 10:07:00 -0500330 res = gfs2_testbit(rbm, true);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100331 if (res != GFS2_BLKST_FREE)
332 return true;
333 (*len)--;
334 if (*len == 0)
335 return true;
Bob Peterson149ed7f2013-09-17 13:14:35 -0400336 if (gfs2_rbm_incr(rbm))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100337 return true;
338 }
339
340 return false;
341}
342
343/**
344 * gfs2_free_extlen - Return extent length of free blocks
Fabian Frederick27ff6a02014-07-02 22:05:27 +0200345 * @rrbm: Starting position
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100346 * @len: Max length to check
347 *
348 * Starting at the block specified by the rbm, see how many free blocks
349 * there are, not reading more than len blocks ahead. This can be done
350 * using memchr_inv when the blocks are byte aligned, but has to be done
351 * on a block by block basis in case of unaligned blocks. Also this
352 * function can cope with bitmap boundaries (although it must stop on
353 * a resource group boundary)
354 *
355 * Returns: Number of free blocks in the extent
356 */
357
358static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
359{
360 struct gfs2_rbm rbm = *rrbm;
361 u32 n_unaligned = rbm.offset & 3;
362 u32 size = len;
363 u32 bytes;
364 u32 chunk_size;
365 u8 *ptr, *start, *end;
366 u64 block;
Bob Petersone579ed42013-09-17 13:12:15 -0400367 struct gfs2_bitmap *bi;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100368
369 if (n_unaligned &&
370 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
371 goto out;
372
Bob Peterson37015302012-09-12 09:40:31 -0400373 n_unaligned = len & 3;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100374 /* Start is now byte aligned */
375 while (len > 3) {
Bob Petersone579ed42013-09-17 13:12:15 -0400376 bi = rbm_bi(&rbm);
377 start = bi->bi_bh->b_data;
378 if (bi->bi_clone)
379 start = bi->bi_clone;
Bob Petersone579ed42013-09-17 13:12:15 -0400380 start += bi->bi_offset;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100381 end = start + bi->bi_bytes;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100382 BUG_ON(rbm.offset & 3);
383 start += (rbm.offset / GFS2_NBBY);
384 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
385 ptr = memchr_inv(start, 0, bytes);
386 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
387 chunk_size *= GFS2_NBBY;
388 BUG_ON(len < chunk_size);
389 len -= chunk_size;
390 block = gfs2_rbm_to_block(&rbm);
Bob Peterson15bd50a2012-12-20 13:21:07 -0500391 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
392 n_unaligned = 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100393 break;
Bob Peterson15bd50a2012-12-20 13:21:07 -0500394 }
395 if (ptr) {
396 n_unaligned = 3;
397 break;
398 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100399 n_unaligned = len & 3;
400 }
401
402 /* Deal with any bits left over at the end */
403 if (n_unaligned)
404 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
405out:
406 return size - len;
407}
408
409/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400410 * gfs2_bitcount - count the number of bits in a certain state
Bob Peterson886b1412012-04-11 13:03:52 -0400411 * @rgd: the resource group descriptor
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400412 * @buffer: the buffer that holds the bitmaps
413 * @buflen: the length (in bytes) of the buffer
414 * @state: the state of the block we're looking for
415 *
416 * Returns: The number of bits
417 */
418
Steven Whitehouse110acf32008-01-29 13:30:20 +0000419static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
420 unsigned int buflen, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400421{
Steven Whitehouse110acf32008-01-29 13:30:20 +0000422 const u8 *byte = buffer;
423 const u8 *end = buffer + buflen;
424 const u8 state1 = state << 2;
425 const u8 state2 = state << 4;
426 const u8 state3 = state << 6;
Steven Whitehousecd915492006-09-04 12:49:07 -0400427 u32 count = 0;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400428
429 for (; byte < end; byte++) {
430 if (((*byte) & 0x03) == state)
431 count++;
432 if (((*byte) & 0x0C) == state1)
433 count++;
434 if (((*byte) & 0x30) == state2)
435 count++;
436 if (((*byte) & 0xC0) == state3)
437 count++;
438 }
439
440 return count;
441}
442
David Teiglandb3b94fa2006-01-16 16:50:04 +0000443/**
444 * gfs2_rgrp_verify - Verify that a resource group is consistent
David Teiglandb3b94fa2006-01-16 16:50:04 +0000445 * @rgd: the rgrp
446 *
447 */
448
449void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
450{
451 struct gfs2_sbd *sdp = rgd->rd_sbd;
452 struct gfs2_bitmap *bi = NULL;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100453 u32 length = rgd->rd_length;
Steven Whitehousecd915492006-09-04 12:49:07 -0400454 u32 count[4], tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000455 int buf, x;
456
Steven Whitehousecd915492006-09-04 12:49:07 -0400457 memset(count, 0, 4 * sizeof(u32));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000458
459 /* Count # blocks in each of 4 possible allocation states */
460 for (buf = 0; buf < length; buf++) {
461 bi = rgd->rd_bits + buf;
462 for (x = 0; x < 4; x++)
463 count[x] += gfs2_bitcount(rgd,
464 bi->bi_bh->b_data +
465 bi->bi_offset,
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100466 bi->bi_bytes, x);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467 }
468
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000469 if (count[0] != rgd->rd_free) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000470 if (gfs2_consist_rgrpd(rgd))
471 fs_err(sdp, "free data mismatch: %u != %u\n",
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000472 count[0], rgd->rd_free);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000473 return;
474 }
475
Steven Whitehouse73f74942008-11-04 10:32:57 +0000476 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500477 if (count[1] != tmp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000478 if (gfs2_consist_rgrpd(rgd))
479 fs_err(sdp, "used data mismatch: %u != %u\n",
480 count[1], tmp);
481 return;
482 }
483
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500484 if (count[2] + count[3] != rgd->rd_dinodes) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000485 if (gfs2_consist_rgrpd(rgd))
486 fs_err(sdp, "used metadata mismatch: %u != %u\n",
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500487 count[2] + count[3], rgd->rd_dinodes);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488 return;
489 }
490}
491
David Teiglandb3b94fa2006-01-16 16:50:04 +0000492/**
493 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
494 * @sdp: The GFS2 superblock
Bob Peterson886b1412012-04-11 13:03:52 -0400495 * @blk: The data block number
496 * @exact: True if this needs to be an exact match
David Teiglandb3b94fa2006-01-16 16:50:04 +0000497 *
Steven Whitehouse90bcab92017-12-22 13:13:07 +0100498 * The @exact argument should be set to true by most callers. The exception
499 * is when we need to match blocks which are not represented by the rgrp
500 * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
501 * there for alignment purposes. Another way of looking at it is that @exact
502 * matches only valid data/metadata blocks, but with @exact false, it will
503 * match any block within the extent of the rgrp.
504 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000505 * Returns: The resource group, or NULL if not found
506 */
507
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000508struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509{
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000510 struct rb_node *n, *next;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100511 struct gfs2_rgrpd *cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000512
513 spin_lock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000514 n = sdp->sd_rindex_tree.rb_node;
515 while (n) {
516 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
517 next = NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100518 if (blk < cur->rd_addr)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000519 next = n->rb_left;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100520 else if (blk >= cur->rd_data0 + cur->rd_data)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000521 next = n->rb_right;
522 if (next == NULL) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000523 spin_unlock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000524 if (exact) {
525 if (blk < cur->rd_addr)
526 return NULL;
527 if (blk >= cur->rd_data0 + cur->rd_data)
528 return NULL;
529 }
Bob Peterson7c9ca622011-08-31 09:53:19 +0100530 return cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000531 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000532 n = next;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000533 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000534 spin_unlock(&sdp->sd_rindex_spin);
535
536 return NULL;
537}
538
539/**
540 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
541 * @sdp: The GFS2 superblock
542 *
543 * Returns: The first rgrp in the filesystem
544 */
545
546struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
547{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100548 const struct rb_node *n;
549 struct gfs2_rgrpd *rgd;
550
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100551 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100552 n = rb_first(&sdp->sd_rindex_tree);
553 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100554 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100555
556 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000557}
558
559/**
560 * gfs2_rgrpd_get_next - get the next RG
Bob Peterson886b1412012-04-11 13:03:52 -0400561 * @rgd: the resource group descriptor
David Teiglandb3b94fa2006-01-16 16:50:04 +0000562 *
563 * Returns: The next rgrp
564 */
565
566struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
567{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100568 struct gfs2_sbd *sdp = rgd->rd_sbd;
569 const struct rb_node *n;
570
571 spin_lock(&sdp->sd_rindex_spin);
572 n = rb_next(&rgd->rd_node);
573 if (n == NULL)
574 n = rb_first(&sdp->sd_rindex_tree);
575
576 if (unlikely(&rgd->rd_node == n)) {
577 spin_unlock(&sdp->sd_rindex_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000578 return NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100579 }
580 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
581 spin_unlock(&sdp->sd_rindex_spin);
582 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000583}
584
Abhi Das00a158b2014-09-18 21:40:28 -0500585void check_and_update_goal(struct gfs2_inode *ip)
586{
587 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
588 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
589 ip->i_goal = ip->i_no_addr;
590}
591
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100592void gfs2_free_clones(struct gfs2_rgrpd *rgd)
593{
594 int x;
595
596 for (x = 0; x < rgd->rd_length; x++) {
597 struct gfs2_bitmap *bi = rgd->rd_bits + x;
598 kfree(bi->bi_clone);
599 bi->bi_clone = NULL;
600 }
601}
602
Bob Peterson0a305e42012-06-06 11:17:59 +0100603/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500604 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
605 * plus a quota allocations data structure, if necessary
Bob Peterson0a305e42012-06-06 11:17:59 +0100606 * @ip: the inode for this reservation
607 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500608int gfs2_rsqa_alloc(struct gfs2_inode *ip)
Bob Peterson0a305e42012-06-06 11:17:59 +0100609{
Bob Petersona097dc7e2015-07-16 08:28:04 -0500610 return gfs2_qa_alloc(ip);
Bob Peterson0a305e42012-06-06 11:17:59 +0100611}
612
Bob Peterson3792ce92019-05-09 09:21:48 -0500613static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
614 const char *fs_id_buf)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400615{
Bob Petersonf85c10e2018-06-13 08:52:47 -0500616 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
617
Bob Peterson3792ce92019-05-09 09:21:48 -0500618 gfs2_print_dbg(seq, "%s B: n:%llu s:%llu b:%u f:%u\n", fs_id_buf,
Bob Petersonf85c10e2018-06-13 08:52:47 -0500619 (unsigned long long)ip->i_no_addr,
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100620 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100621 rs->rs_rbm.offset, rs->rs_free);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400622}
623
Bob Peterson0a305e42012-06-06 11:17:59 +0100624/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400625 * __rs_deltree - remove a multi-block reservation from the rgd tree
626 * @rs: The reservation to remove
627 *
628 */
Bob Peterson20095212013-03-13 10:26:38 -0400629static void __rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400630{
631 struct gfs2_rgrpd *rgd;
632
633 if (!gfs2_rs_active(rs))
634 return;
635
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100636 rgd = rs->rs_rbm.rgd;
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100637 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100638 rb_erase(&rs->rs_node, &rgd->rd_rstree);
Michel Lespinasse24d634e2012-08-05 22:04:08 -0700639 RB_CLEAR_NODE(&rs->rs_node);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400640
641 if (rs->rs_free) {
Andreas Gruenbacherec23df22018-09-27 15:30:25 +0100642 u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
643 rs->rs_free - 1;
644 struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
645 struct gfs2_bitmap *start, *last;
Bob Petersone579ed42013-09-17 13:12:15 -0400646
Bob Peterson20095212013-03-13 10:26:38 -0400647 /* return reserved blocks to the rgrp */
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100648 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
649 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
Bob Peterson5ea50502013-11-25 11:16:25 +0000650 /* The rgrp extent failure point is likely not to increase;
651 it will only do so if the freed blocks are somehow
652 contiguous with a span of free blocks that follows. Still,
653 it will force the number to be recalculated later. */
654 rgd->rd_extfail_pt += rs->rs_free;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400655 rs->rs_free = 0;
Andreas Gruenbacherec23df22018-09-27 15:30:25 +0100656 if (gfs2_rbm_from_block(&last_rbm, last_block))
657 return;
658 start = rbm_bi(&rs->rs_rbm);
659 last = rbm_bi(&last_rbm);
660 do
661 clear_bit(GBF_FULL, &start->bi_flags);
662 while (start++ != last);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400663 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400664}
665
666/**
667 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
668 * @rs: The reservation to remove
669 *
670 */
Bob Peterson20095212013-03-13 10:26:38 -0400671void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400672{
673 struct gfs2_rgrpd *rgd;
674
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100675 rgd = rs->rs_rbm.rgd;
676 if (rgd) {
677 spin_lock(&rgd->rd_rsspin);
Bob Peterson20095212013-03-13 10:26:38 -0400678 __rs_deltree(rs);
Bob Peterson44f52122016-07-06 10:36:43 -0500679 BUG_ON(rs->rs_free);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100680 spin_unlock(&rgd->rd_rsspin);
681 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400682}
683
684/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500685 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
Bob Peterson0a305e42012-06-06 11:17:59 +0100686 * @ip: The inode for this reservation
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100687 * @wcount: The inode's write count, or NULL
Bob Peterson0a305e42012-06-06 11:17:59 +0100688 *
689 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500690void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
Bob Peterson0a305e42012-06-06 11:17:59 +0100691{
692 down_write(&ip->i_rw_mutex);
Bob Peterson44f52122016-07-06 10:36:43 -0500693 if ((wcount == NULL) || (atomic_read(wcount) <= 1))
Bob Petersona097dc7e2015-07-16 08:28:04 -0500694 gfs2_rs_deltree(&ip->i_res);
Bob Peterson0a305e42012-06-06 11:17:59 +0100695 up_write(&ip->i_rw_mutex);
Bob Petersona097dc7e2015-07-16 08:28:04 -0500696 gfs2_qa_delete(ip, wcount);
Bob Peterson0a305e42012-06-06 11:17:59 +0100697}
698
Bob Peterson8e2e0042012-07-19 08:12:40 -0400699/**
700 * return_all_reservations - return all reserved blocks back to the rgrp.
701 * @rgd: the rgrp that needs its space back
702 *
703 * We previously reserved a bunch of blocks for allocation. Now we need to
704 * give them back. This leave the reservation structures in tact, but removes
705 * all of their corresponding "no-fly zones".
706 */
707static void return_all_reservations(struct gfs2_rgrpd *rgd)
708{
709 struct rb_node *n;
710 struct gfs2_blkreserv *rs;
711
712 spin_lock(&rgd->rd_rsspin);
713 while ((n = rb_first(&rgd->rd_rstree))) {
714 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Bob Peterson20095212013-03-13 10:26:38 -0400715 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400716 }
717 spin_unlock(&rgd->rd_rsspin);
718}
719
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100720void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000721{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100722 struct rb_node *n;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000723 struct gfs2_rgrpd *rgd;
724 struct gfs2_glock *gl;
725
Bob Peterson7c9ca622011-08-31 09:53:19 +0100726 while ((n = rb_first(&sdp->sd_rindex_tree))) {
727 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000728 gl = rgd->rd_gl;
729
Bob Peterson7c9ca622011-08-31 09:53:19 +0100730 rb_erase(n, &sdp->sd_rindex_tree);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000731
732 if (gl) {
Andreas Gruenbacher7023a0b2017-08-30 07:46:24 -0500733 glock_clear_object(gl, rgd);
Andreas Gruenbacher10283ea2018-11-05 22:57:24 +0000734 gfs2_rgrp_brelse(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000735 gfs2_glock_put(gl);
736 }
737
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100738 gfs2_free_clones(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000739 kfree(rgd->rd_bits);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500740 rgd->rd_bits = NULL;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400741 return_all_reservations(rgd);
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600742 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000743 }
744}
745
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100746static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
747{
Bob Petersone54c78a2018-10-03 08:47:36 -0500748 struct gfs2_sbd *sdp = rgd->rd_sbd;
749
750 fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
751 fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
752 fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
753 fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
754 fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100755}
756
David Teiglandb3b94fa2006-01-16 16:50:04 +0000757/**
758 * gfs2_compute_bitstructs - Compute the bitmap sizes
759 * @rgd: The resource group descriptor
760 *
761 * Calculates bitmap descriptors, one for each block that contains bitmap data
762 *
763 * Returns: errno
764 */
765
766static int compute_bitstructs(struct gfs2_rgrpd *rgd)
767{
768 struct gfs2_sbd *sdp = rgd->rd_sbd;
769 struct gfs2_bitmap *bi;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100770 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
Steven Whitehousecd915492006-09-04 12:49:07 -0400771 u32 bytes_left, bytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000772 int x;
773
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400774 if (!length)
775 return -EINVAL;
776
Steven Whitehousedd894be2006-07-27 14:29:00 -0400777 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000778 if (!rgd->rd_bits)
779 return -ENOMEM;
780
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100781 bytes_left = rgd->rd_bitbytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000782
783 for (x = 0; x < length; x++) {
784 bi = rgd->rd_bits + x;
785
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +0100786 bi->bi_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000787 /* small rgrp; bitmap stored completely in header block */
788 if (length == 1) {
789 bytes = bytes_left;
790 bi->bi_offset = sizeof(struct gfs2_rgrp);
791 bi->bi_start = 0;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100792 bi->bi_bytes = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500793 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000794 /* header block */
795 } else if (x == 0) {
796 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
797 bi->bi_offset = sizeof(struct gfs2_rgrp);
798 bi->bi_start = 0;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100799 bi->bi_bytes = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500800 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000801 /* last block */
802 } else if (x + 1 == length) {
803 bytes = bytes_left;
804 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100805 bi->bi_start = rgd->rd_bitbytes - bytes_left;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100806 bi->bi_bytes = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500807 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000808 /* other blocks */
809 } else {
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500810 bytes = sdp->sd_sb.sb_bsize -
811 sizeof(struct gfs2_meta_header);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000812 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100813 bi->bi_start = rgd->rd_bitbytes - bytes_left;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100814 bi->bi_bytes = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500815 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000816 }
817
818 bytes_left -= bytes;
819 }
820
821 if (bytes_left) {
822 gfs2_consist_rgrpd(rgd);
823 return -EIO;
824 }
825 bi = rgd->rd_bits + (length - 1);
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100826 if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000827 if (gfs2_consist_rgrpd(rgd)) {
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100828 gfs2_rindex_print(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000829 fs_err(sdp, "start=%u len=%u offset=%u\n",
Andreas Gruenbacher281b4952018-09-26 23:32:46 +0100830 bi->bi_start, bi->bi_bytes, bi->bi_offset);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000831 }
832 return -EIO;
833 }
834
835 return 0;
836}
837
838/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500839 * gfs2_ri_total - Total up the file system space, according to the rindex.
Bob Peterson886b1412012-04-11 13:03:52 -0400840 * @sdp: the filesystem
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500841 *
842 */
843u64 gfs2_ri_total(struct gfs2_sbd *sdp)
844{
845 u64 total_data = 0;
846 struct inode *inode = sdp->sd_rindex;
847 struct gfs2_inode *ip = GFS2_I(inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500848 char buf[sizeof(struct gfs2_rindex)];
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500849 int error, rgrps;
850
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500851 for (rgrps = 0;; rgrps++) {
852 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
853
Bob Petersonbcd72782010-12-07 13:58:56 -0500854 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500855 break;
Andrew Price43066292012-04-16 16:40:55 +0100856 error = gfs2_internal_read(ip, buf, &pos,
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500857 sizeof(struct gfs2_rindex));
858 if (error != sizeof(struct gfs2_rindex))
859 break;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100860 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500861 }
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500862 return total_data;
863}
864
Bob Peterson6aad1c32012-03-05 09:20:59 -0500865static int rgd_insert(struct gfs2_rgrpd *rgd)
Bob Peterson7c9ca622011-08-31 09:53:19 +0100866{
867 struct gfs2_sbd *sdp = rgd->rd_sbd;
868 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
869
870 /* Figure out where to put new node */
871 while (*newn) {
872 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
873 rd_node);
874
875 parent = *newn;
876 if (rgd->rd_addr < cur->rd_addr)
877 newn = &((*newn)->rb_left);
878 else if (rgd->rd_addr > cur->rd_addr)
879 newn = &((*newn)->rb_right);
880 else
Bob Peterson6aad1c32012-03-05 09:20:59 -0500881 return -EEXIST;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100882 }
883
884 rb_link_node(&rgd->rd_node, parent, newn);
885 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500886 sdp->sd_rgrps++;
887 return 0;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100888}
889
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500890/**
Robert Peterson6c532672007-05-10 16:54:38 -0500891 * read_rindex_entry - Pull in a new resource index entry from the disk
Andrew Price43066292012-04-16 16:40:55 +0100892 * @ip: Pointer to the rindex inode
David Teiglandb3b94fa2006-01-16 16:50:04 +0000893 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100894 * Returns: 0 on success, > 0 on EOF, error code otherwise
Robert Peterson6c532672007-05-10 16:54:38 -0500895 */
896
Andrew Price43066292012-04-16 16:40:55 +0100897static int read_rindex_entry(struct gfs2_inode *ip)
Robert Peterson6c532672007-05-10 16:54:38 -0500898{
899 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000900 const unsigned bsize = sdp->sd_sb.sb_bsize;
Robert Peterson6c532672007-05-10 16:54:38 -0500901 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100902 struct gfs2_rindex buf;
Robert Peterson6c532672007-05-10 16:54:38 -0500903 int error;
904 struct gfs2_rgrpd *rgd;
905
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100906 if (pos >= i_size_read(&ip->i_inode))
907 return 1;
908
Andrew Price43066292012-04-16 16:40:55 +0100909 error = gfs2_internal_read(ip, (char *)&buf, &pos,
Robert Peterson6c532672007-05-10 16:54:38 -0500910 sizeof(struct gfs2_rindex));
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100911
912 if (error != sizeof(struct gfs2_rindex))
913 return (error == 0) ? 1 : error;
Robert Peterson6c532672007-05-10 16:54:38 -0500914
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600915 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
Robert Peterson6c532672007-05-10 16:54:38 -0500916 error = -ENOMEM;
917 if (!rgd)
918 return error;
919
Robert Peterson6c532672007-05-10 16:54:38 -0500920 rgd->rd_sbd = sdp;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100921 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
922 rgd->rd_length = be32_to_cpu(buf.ri_length);
923 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
924 rgd->rd_data = be32_to_cpu(buf.ri_data);
925 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400926 spin_lock_init(&rgd->rd_rsspin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100927
Robert Peterson6c532672007-05-10 16:54:38 -0500928 error = compute_bitstructs(rgd);
929 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100930 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500931
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100932 error = gfs2_glock_get(sdp, rgd->rd_addr,
Robert Peterson6c532672007-05-10 16:54:38 -0500933 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
934 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100935 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500936
David Teigland4e2f8842012-11-14 13:47:37 -0500937 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
Bob Peterson0e27c182014-10-29 08:02:28 -0500938 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100939 if (rgd->rd_data > sdp->sd_max_rg_data)
940 sdp->sd_max_rg_data = rgd->rd_data;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100941 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500942 error = rgd_insert(rgd);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100943 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500944 if (!error) {
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500945 glock_set_object(rgd->rd_gl, rgd);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500946 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
947 rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
948 rgd->rd_length) * bsize) - 1;
Bob Peterson6aad1c32012-03-05 09:20:59 -0500949 return 0;
Bob Peterson36e4ad02016-06-09 14:24:07 -0500950 }
Bob Peterson6aad1c32012-03-05 09:20:59 -0500951
952 error = 0; /* someone else read in the rgrp; free it and ignore it */
Bob Petersonc1ac5392012-03-22 08:58:30 -0400953 gfs2_glock_put(rgd->rd_gl);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100954
955fail:
956 kfree(rgd->rd_bits);
Bob Peterson36e4ad02016-06-09 14:24:07 -0500957 rgd->rd_bits = NULL;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100958 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
Robert Peterson6c532672007-05-10 16:54:38 -0500959 return error;
960}
961
962/**
Bob Peterson0e27c182014-10-29 08:02:28 -0500963 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
964 * @sdp: the GFS2 superblock
965 *
966 * The purpose of this function is to select a subset of the resource groups
967 * and mark them as PREFERRED. We do it in such a way that each node prefers
968 * to use a unique set of rgrps to minimize glock contention.
969 */
970static void set_rgrp_preferences(struct gfs2_sbd *sdp)
971{
972 struct gfs2_rgrpd *rgd, *first;
973 int i;
974
975 /* Skip an initial number of rgrps, based on this node's journal ID.
976 That should start each node out on its own set. */
977 rgd = gfs2_rgrpd_get_first(sdp);
978 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
979 rgd = gfs2_rgrpd_get_next(rgd);
980 first = rgd;
981
982 do {
983 rgd->rd_flags |= GFS2_RDF_PREFERRED;
984 for (i = 0; i < sdp->sd_journals; i++) {
985 rgd = gfs2_rgrpd_get_next(rgd);
Abhi Das959b6712015-05-05 11:26:04 -0500986 if (!rgd || rgd == first)
Bob Peterson0e27c182014-10-29 08:02:28 -0500987 break;
988 }
Abhi Das959b6712015-05-05 11:26:04 -0500989 } while (rgd && rgd != first);
Bob Peterson0e27c182014-10-29 08:02:28 -0500990}
991
992/**
Robert Peterson6c532672007-05-10 16:54:38 -0500993 * gfs2_ri_update - Pull in a new resource index from the disk
994 * @ip: pointer to the rindex inode
995 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000996 * Returns: 0 on successful update, error code otherwise
997 */
998
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100999static int gfs2_ri_update(struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001000{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001001 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001002 int error;
1003
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001004 do {
Andrew Price43066292012-04-16 16:40:55 +01001005 error = read_rindex_entry(ip);
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001006 } while (error == 0);
1007
1008 if (error < 0)
1009 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001010
Bob Peterson0e27c182014-10-29 08:02:28 -05001011 set_rgrp_preferences(sdp);
1012
Bob Petersoncf45b752008-01-31 10:31:39 -06001013 sdp->sd_rindex_uptodate = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001014 return 0;
Robert Peterson6c532672007-05-10 16:54:38 -05001015}
David Teiglandb3b94fa2006-01-16 16:50:04 +00001016
Robert Peterson6c532672007-05-10 16:54:38 -05001017/**
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001018 * gfs2_rindex_update - Update the rindex if required
David Teiglandb3b94fa2006-01-16 16:50:04 +00001019 * @sdp: The GFS2 superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +00001020 *
1021 * We grab a lock on the rindex inode to make sure that it doesn't
1022 * change whilst we are performing an operation. We keep this lock
1023 * for quite long periods of time compared to other locks. This
1024 * doesn't matter, since it is shared and it is very, very rarely
1025 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1026 *
1027 * This makes sure that we're using the latest copy of the resource index
1028 * special file, which might have been updated if someone expanded the
1029 * filesystem (via gfs2_grow utility), which adds new resource groups.
1030 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001031 * Returns: 0 on succeess, error code otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001032 */
1033
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001034int gfs2_rindex_update(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001035{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001036 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001037 struct gfs2_glock *gl = ip->i_gl;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001038 struct gfs2_holder ri_gh;
1039 int error = 0;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001040 int unlock_required = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001041
1042 /* Read new copy from disk if we don't have the latest */
Bob Petersoncf45b752008-01-31 10:31:39 -06001043 if (!sdp->sd_rindex_uptodate) {
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001044 if (!gfs2_glock_is_locked_by_me(gl)) {
1045 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1046 if (error)
Bob Peterson6aad1c32012-03-05 09:20:59 -05001047 return error;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001048 unlock_required = 1;
1049 }
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001050 if (!sdp->sd_rindex_uptodate)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001051 error = gfs2_ri_update(ip);
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001052 if (unlock_required)
1053 gfs2_glock_dq_uninit(&ri_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001054 }
1055
1056 return error;
1057}
1058
Bob Peterson42d52e32008-01-28 18:38:07 -06001059static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001060{
1061 const struct gfs2_rgrp *str = buf;
Bob Peterson42d52e32008-01-28 18:38:07 -06001062 u32 rg_flags;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001063
Bob Peterson42d52e32008-01-28 18:38:07 -06001064 rg_flags = be32_to_cpu(str->rg_flags);
Steven Whitehouse09010972009-05-20 10:48:47 +01001065 rg_flags &= ~GFS2_RDF_MASK;
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001066 rgd->rd_flags &= GFS2_RDF_MASK;
1067 rgd->rd_flags |= rg_flags;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001068 rgd->rd_free = be32_to_cpu(str->rg_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001069 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001070 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
Andrew Price166725d2017-12-12 11:40:05 -06001071 /* rd_data0, rd_data and rd_bitbytes already set from rindex */
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001072}
1073
Bob Peterson3f30f922018-07-26 12:59:13 -05001074static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1075{
1076 const struct gfs2_rgrp *str = buf;
1077
1078 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1079 rgl->rl_flags = str->rg_flags;
1080 rgl->rl_free = str->rg_free;
1081 rgl->rl_dinodes = str->rg_dinodes;
1082 rgl->rl_igeneration = str->rg_igeneration;
1083 rgl->__pad = 0UL;
1084}
1085
Bob Peterson42d52e32008-01-28 18:38:07 -06001086static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001087{
Andrew Price65adc272017-12-12 11:37:15 -06001088 struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001089 struct gfs2_rgrp *str = buf;
Andrew Price850d2d92017-12-12 11:42:30 -06001090 u32 crc;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001091
Steven Whitehouse09010972009-05-20 10:48:47 +01001092 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001093 str->rg_free = cpu_to_be32(rgd->rd_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001094 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
Andrew Price65adc272017-12-12 11:37:15 -06001095 if (next == NULL)
1096 str->rg_skip = 0;
1097 else if (next->rd_addr > rgd->rd_addr)
1098 str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001099 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
Andrew Price166725d2017-12-12 11:40:05 -06001100 str->rg_data0 = cpu_to_be64(rgd->rd_data0);
1101 str->rg_data = cpu_to_be32(rgd->rd_data);
1102 str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
Andrew Price850d2d92017-12-12 11:42:30 -06001103 str->rg_crc = 0;
1104 crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
1105 str->rg_crc = cpu_to_be32(crc);
Andrew Price166725d2017-12-12 11:40:05 -06001106
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001107 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
Bob Peterson3f30f922018-07-26 12:59:13 -05001108 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001109}
1110
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001111static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1112{
1113 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1114 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
Bob Petersonf29e62e2019-05-13 09:42:18 -05001115 struct gfs2_sbd *sdp = rgd->rd_sbd;
Bob Peterson72244b62018-08-15 12:09:49 -05001116 int valid = 1;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001117
Bob Peterson72244b62018-08-15 12:09:49 -05001118 if (rgl->rl_flags != str->rg_flags) {
Bob Petersonf29e62e2019-05-13 09:42:18 -05001119 fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u",
1120 (unsigned long long)rgd->rd_addr,
Bob Peterson72244b62018-08-15 12:09:49 -05001121 be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
1122 valid = 0;
1123 }
1124 if (rgl->rl_free != str->rg_free) {
Bob Petersonf29e62e2019-05-13 09:42:18 -05001125 fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u",
1126 (unsigned long long)rgd->rd_addr,
1127 be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
Bob Peterson72244b62018-08-15 12:09:49 -05001128 valid = 0;
1129 }
1130 if (rgl->rl_dinodes != str->rg_dinodes) {
Bob Petersonf29e62e2019-05-13 09:42:18 -05001131 fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
1132 (unsigned long long)rgd->rd_addr,
1133 be32_to_cpu(rgl->rl_dinodes),
1134 be32_to_cpu(str->rg_dinodes));
Bob Peterson72244b62018-08-15 12:09:49 -05001135 valid = 0;
1136 }
1137 if (rgl->rl_igeneration != str->rg_igeneration) {
Bob Petersonf29e62e2019-05-13 09:42:18 -05001138 fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu",
1139 (unsigned long long)rgd->rd_addr,
1140 (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
1141 (unsigned long long)be64_to_cpu(str->rg_igeneration));
Bob Peterson72244b62018-08-15 12:09:49 -05001142 valid = 0;
1143 }
1144 return valid;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001145}
1146
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001147static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1148{
1149 struct gfs2_bitmap *bi;
1150 const u32 length = rgd->rd_length;
1151 const u8 *buffer = NULL;
1152 u32 i, goal, count = 0;
1153
1154 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1155 goal = 0;
1156 buffer = bi->bi_bh->b_data + bi->bi_offset;
1157 WARN_ON(!buffer_uptodate(bi->bi_bh));
Andreas Gruenbacher281b4952018-09-26 23:32:46 +01001158 while (goal < bi->bi_blocks) {
1159 goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001160 GFS2_BLKST_UNLINKED);
1161 if (goal == BFITNOENT)
1162 break;
1163 count++;
1164 goal++;
1165 }
1166 }
1167
1168 return count;
1169}
1170
1171
David Teiglandb3b94fa2006-01-16 16:50:04 +00001172/**
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001173 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1174 * @rgd: the struct gfs2_rgrpd describing the RG to read in
David Teiglandb3b94fa2006-01-16 16:50:04 +00001175 *
1176 * Read in all of a Resource Group's header and bitmap blocks.
Andreas Gruenbacher10283ea2018-11-05 22:57:24 +00001177 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001178 *
1179 * Returns: errno
1180 */
1181
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301182static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001183{
1184 struct gfs2_sbd *sdp = rgd->rd_sbd;
1185 struct gfs2_glock *gl = rgd->rd_gl;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001186 unsigned int length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001187 struct gfs2_bitmap *bi;
1188 unsigned int x, y;
1189 int error;
1190
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001191 if (rgd->rd_bits[0].bi_bh != NULL)
1192 return 0;
1193
David Teiglandb3b94fa2006-01-16 16:50:04 +00001194 for (x = 0; x < length; x++) {
1195 bi = rgd->rd_bits + x;
Andreas Gruenbacherc8d57702015-11-11 15:00:35 -06001196 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001197 if (error)
1198 goto fail;
1199 }
1200
1201 for (y = length; y--;) {
1202 bi = rgd->rd_bits + y;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001203 error = gfs2_meta_wait(sdp, bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001204 if (error)
1205 goto fail;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001206 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
David Teiglandb3b94fa2006-01-16 16:50:04 +00001207 GFS2_METATYPE_RG)) {
1208 error = -EIO;
1209 goto fail;
1210 }
1211 }
1212
Bob Petersoncf45b752008-01-31 10:31:39 -06001213 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01001214 for (x = 0; x < length; x++)
1215 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
Bob Peterson42d52e32008-01-28 18:38:07 -06001216 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001217 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
Bob Peterson7c9ca622011-08-31 09:53:19 +01001218 rgd->rd_free_clone = rgd->rd_free;
Bob Peterson5ea50502013-11-25 11:16:25 +00001219 /* max out the rgrp allocation failure point */
1220 rgd->rd_extfail_pt = rgd->rd_free;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001221 }
Al Viro951b4bd2013-06-02 19:53:40 -04001222 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001223 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1224 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1225 rgd->rd_bits[0].bi_bh->b_data);
1226 }
1227 else if (sdp->sd_args.ar_rgrplvb) {
1228 if (!gfs2_rgrp_lvb_valid(rgd)){
1229 gfs2_consist_rgrpd(rgd);
1230 error = -EIO;
1231 goto fail;
1232 }
1233 if (rgd->rd_rgl->rl_unlinked == 0)
1234 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1235 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001236 return 0;
1237
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001238fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001239 while (x--) {
1240 bi = rgd->rd_bits + x;
1241 brelse(bi->bi_bh);
1242 bi->bi_bh = NULL;
1243 gfs2_assert_warn(sdp, !bi->bi_clone);
1244 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001245
1246 return error;
1247}
1248
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301249static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001250{
1251 u32 rl_flags;
1252
1253 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1254 return 0;
1255
Al Viro951b4bd2013-06-02 19:53:40 -04001256 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001257 return gfs2_rgrp_bh_get(rgd);
1258
1259 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1260 rl_flags &= ~GFS2_RDF_MASK;
1261 rgd->rd_flags &= GFS2_RDF_MASK;
Bob Peterson4f36cb32018-08-16 10:32:13 -05001262 rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001263 if (rgd->rd_rgl->rl_unlinked == 0)
1264 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1265 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1266 rgd->rd_free_clone = rgd->rd_free;
1267 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1268 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1269 return 0;
1270}
1271
1272int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1273{
1274 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1275 struct gfs2_sbd *sdp = rgd->rd_sbd;
1276
1277 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1278 return 0;
Bob Peterson8b127d02014-01-16 08:52:16 -05001279 return gfs2_rgrp_bh_get(rgd);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001280}
1281
David Teiglandb3b94fa2006-01-16 16:50:04 +00001282/**
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001283 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1284 * @rgd: The resource group
David Teiglandb3b94fa2006-01-16 16:50:04 +00001285 *
1286 */
1287
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001288void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001289{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001290 int x, length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001291
David Teiglandb3b94fa2006-01-16 16:50:04 +00001292 for (x = 0; x < length; x++) {
1293 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001294 if (bi->bi_bh) {
1295 brelse(bi->bi_bh);
1296 bi->bi_bh = NULL;
1297 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001298 }
1299
David Teiglandb3b94fa2006-01-16 16:50:04 +00001300}
1301
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001302/**
1303 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1304 * @gh: The glock holder for the resource group
1305 *
1306 */
1307
1308void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1309{
1310 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1311 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1312 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1313
1314 if (rgd && demote_requested)
1315 gfs2_rgrp_brelse(rgd);
1316}
1317
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001318int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
Bob Peterson7c9ca622011-08-31 09:53:19 +01001319 struct buffer_head *bh,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001320 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001321{
1322 struct super_block *sb = sdp->sd_vfs;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001323 u64 blk;
Steven Whitehouse64d576b2009-02-12 13:31:58 +00001324 sector_t start = 0;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001325 sector_t nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001326 int rv;
1327 unsigned int x;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001328 u32 trimmed = 0;
1329 u8 diff;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001330
Andreas Gruenbacher281b4952018-09-26 23:32:46 +01001331 for (x = 0; x < bi->bi_bytes; x++) {
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001332 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1333 clone += bi->bi_offset;
1334 clone += x;
1335 if (bh) {
1336 const u8 *orig = bh->b_data + bi->bi_offset + x;
1337 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1338 } else {
1339 diff = ~(*clone | (*clone >> 1));
1340 }
Steven Whitehousef15ab562009-02-09 09:25:01 +00001341 diff &= 0x55;
1342 if (diff == 0)
1343 continue;
1344 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001345 while(diff) {
1346 if (diff & 1) {
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001347 if (nr_blks == 0)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001348 goto start_new_extent;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001349 if ((start + nr_blks) != blk) {
1350 if (nr_blks >= minlen) {
1351 rv = sb_issue_discard(sb,
1352 start, nr_blks,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001353 GFP_NOFS, 0);
1354 if (rv)
1355 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001356 trimmed += nr_blks;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001357 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001358 nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001359start_new_extent:
1360 start = blk;
1361 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001362 nr_blks++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001363 }
1364 diff >>= 2;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001365 blk++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001366 }
1367 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001368 if (nr_blks >= minlen) {
1369 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001370 if (rv)
1371 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001372 trimmed += nr_blks;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001373 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001374 if (ptrimmed)
1375 *ptrimmed = trimmed;
1376 return 0;
1377
Steven Whitehousef15ab562009-02-09 09:25:01 +00001378fail:
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001379 if (sdp->sd_args.ar_discard)
Andreas Gruenbacheraf388162018-01-30 10:32:30 -07001380 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001381 sdp->sd_args.ar_discard = 0;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001382 return -EIO;
1383}
1384
1385/**
1386 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1387 * @filp: Any file on the filesystem
1388 * @argp: Pointer to the arguments (also used to pass result)
1389 *
1390 * Returns: 0 on success, otherwise error code
1391 */
1392
1393int gfs2_fitrim(struct file *filp, void __user *argp)
1394{
Al Viro496ad9a2013-01-23 17:07:38 -05001395 struct inode *inode = file_inode(filp);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001396 struct gfs2_sbd *sdp = GFS2_SB(inode);
1397 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1398 struct buffer_head *bh;
1399 struct gfs2_rgrpd *rgd;
1400 struct gfs2_rgrpd *rgd_end;
1401 struct gfs2_holder gh;
1402 struct fstrim_range r;
1403 int ret = 0;
1404 u64 amt;
1405 u64 trimmed = 0;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001406 u64 start, end, minlen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001407 unsigned int x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001408 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001409
1410 if (!capable(CAP_SYS_ADMIN))
1411 return -EPERM;
1412
1413 if (!blk_queue_discard(q))
1414 return -EOPNOTSUPP;
1415
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001416 if (copy_from_user(&r, argp, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001417 return -EFAULT;
1418
Bob Peterson5e2f7d62012-04-04 22:11:16 -04001419 ret = gfs2_rindex_update(sdp);
1420 if (ret)
1421 return ret;
1422
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001423 start = r.start >> bs_shift;
1424 end = start + (r.len >> bs_shift);
1425 minlen = max_t(u64, r.minlen,
1426 q->limits.discard_granularity) >> bs_shift;
1427
Abhijith Das6a98c332013-06-19 17:03:29 -04001428 if (end <= start || minlen > sdp->sd_max_rg_data)
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001429 return -EINVAL;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001430
Abhijith Das6a98c332013-06-19 17:03:29 -04001431 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1432 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1433
1434 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1435 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1436 return -EINVAL; /* start is beyond the end of the fs */
1437
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001438 while (1) {
1439
1440 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1441 if (ret)
1442 goto out;
1443
1444 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1445 /* Trim each bitmap in the rgrp */
1446 for (x = 0; x < rgd->rd_length; x++) {
1447 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001448 ret = gfs2_rgrp_send_discards(sdp,
1449 rgd->rd_data0, NULL, bi, minlen,
1450 &amt);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001451 if (ret) {
1452 gfs2_glock_dq_uninit(&gh);
1453 goto out;
1454 }
1455 trimmed += amt;
1456 }
1457
1458 /* Mark rgrp as having been trimmed */
1459 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1460 if (ret == 0) {
1461 bh = rgd->rd_bits[0].bi_bh;
1462 rgd->rd_flags |= GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00001463 gfs2_trans_add_meta(rgd->rd_gl, bh);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001464 gfs2_rgrp_out(rgd, bh->b_data);
1465 gfs2_trans_end(sdp);
1466 }
1467 }
1468 gfs2_glock_dq_uninit(&gh);
1469
1470 if (rgd == rgd_end)
1471 break;
1472
1473 rgd = gfs2_rgrpd_get_next(rgd);
1474 }
1475
1476out:
Abhijith Das6a98c332013-06-19 17:03:29 -04001477 r.len = trimmed << bs_shift;
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001478 if (copy_to_user(argp, &r, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001479 return -EFAULT;
1480
1481 return ret;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001482}
1483
David Teiglandb3b94fa2006-01-16 16:50:04 +00001484/**
Bob Peterson8e2e0042012-07-19 08:12:40 -04001485 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
Bob Peterson8e2e0042012-07-19 08:12:40 -04001486 * @ip: the inode structure
Bob Peterson8e2e0042012-07-19 08:12:40 -04001487 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001488 */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001489static void rs_insert(struct gfs2_inode *ip)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001490{
1491 struct rb_node **newn, *parent = NULL;
1492 int rc;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001493 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001494 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001495 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1496
1497 BUG_ON(gfs2_rs_active(rs));
Bob Peterson8e2e0042012-07-19 08:12:40 -04001498
1499 spin_lock(&rgd->rd_rsspin);
1500 newn = &rgd->rd_rstree.rb_node;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001501 while (*newn) {
1502 struct gfs2_blkreserv *cur =
1503 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1504
1505 parent = *newn;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001506 rc = rs_cmp(fsblock, rs->rs_free, cur);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001507 if (rc > 0)
1508 newn = &((*newn)->rb_right);
1509 else if (rc < 0)
1510 newn = &((*newn)->rb_left);
1511 else {
1512 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001513 WARN_ON(1);
1514 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001515 }
1516 }
1517
Bob Peterson8e2e0042012-07-19 08:12:40 -04001518 rb_link_node(&rs->rs_node, parent, newn);
1519 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1520
Bob Peterson8e2e0042012-07-19 08:12:40 -04001521 /* Do our rgrp accounting for the reservation */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001522 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
Bob Peterson8e2e0042012-07-19 08:12:40 -04001523 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse9e733d32012-08-23 15:37:59 +01001524 trace_gfs2_rs(rs, TRACE_RS_INSERT);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001525}
1526
1527/**
Bob Petersonf6753df2018-05-30 14:05:15 -05001528 * rgd_free - return the number of free blocks we can allocate.
1529 * @rgd: the resource group
1530 *
1531 * This function returns the number of free blocks for an rgrp.
1532 * That's the clone-free blocks (blocks that are free, not including those
1533 * still being used for unlinked files that haven't been deleted.)
1534 *
1535 * It also subtracts any blocks reserved by someone else, but does not
1536 * include free blocks that are still part of our current reservation,
1537 * because obviously we can (and will) allocate them.
1538 */
1539static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
1540{
1541 u32 tot_reserved, tot_free;
1542
1543 if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
1544 return 0;
1545 tot_reserved = rgd->rd_reserved - rs->rs_free;
1546
1547 if (rgd->rd_free_clone < tot_reserved)
1548 tot_reserved = 0;
1549
1550 tot_free = rgd->rd_free_clone - tot_reserved;
1551
1552 return tot_free;
1553}
1554
1555/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001556 * rg_mblk_search - find a group of multiple free blocks to form a reservation
Bob Peterson8e2e0042012-07-19 08:12:40 -04001557 * @rgd: the resource group descriptor
Bob Peterson8e2e0042012-07-19 08:12:40 -04001558 * @ip: pointer to the inode for which we're reserving blocks
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001559 * @ap: the allocation parameters
Bob Peterson8e2e0042012-07-19 08:12:40 -04001560 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001561 */
1562
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001563static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001564 const struct gfs2_alloc_parms *ap)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001565{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001566 struct gfs2_rbm rbm = { .rgd = rgd, };
1567 u64 goal;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001568 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001569 u32 extlen;
Bob Petersonf6753df2018-05-30 14:05:15 -05001570 u32 free_blocks = rgd_free(rgd, rs);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001571 int ret;
Bob Petersonaf21ca82013-05-14 13:04:29 -04001572 struct inode *inode = &ip->i_inode;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001573
Bob Petersonaf21ca82013-05-14 13:04:29 -04001574 if (S_ISDIR(inode->i_mode))
1575 extlen = 1;
1576 else {
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01001577 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
Andreas Gruenbacherad899452018-09-25 12:59:31 +01001578 extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
Bob Petersonaf21ca82013-05-14 13:04:29 -04001579 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001580 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001581 return;
1582
Bob Peterson8e2e0042012-07-19 08:12:40 -04001583 /* Find bitmap block that contains bits for goal block */
1584 if (rgrp_contains_block(rgd, ip->i_goal))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001585 goal = ip->i_goal;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001586 else
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001587 goal = rgd->rd_last_alloc + rgd->rd_data0;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001588
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001589 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1590 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001591
Bob Peterson8381e602016-05-02 09:42:49 -05001592 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001593 if (ret == 0) {
1594 rs->rs_rbm = rbm;
1595 rs->rs_free = extlen;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001596 rs_insert(ip);
Bob Peterson13d2eb02012-12-20 13:23:04 -05001597 } else {
1598 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1599 rgd->rd_last_alloc = 0;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001600 }
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001601}
1602
David Teiglandb3b94fa2006-01-16 16:50:04 +00001603/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001604 * gfs2_next_unreserved_block - Return next block that is not reserved
1605 * @rgd: The resource group
1606 * @block: The starting block
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001607 * @length: The required length
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001608 * @ip: Ignore any reservations for this inode
1609 *
1610 * If the block does not appear in any reservation, then return the
1611 * block number unchanged. If it does appear in the reservation, then
1612 * keep looking through the tree of reservations in order to find the
1613 * first block number which is not reserved.
1614 */
1615
1616static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001617 u32 length,
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001618 const struct gfs2_inode *ip)
1619{
1620 struct gfs2_blkreserv *rs;
1621 struct rb_node *n;
1622 int rc;
1623
1624 spin_lock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001625 n = rgd->rd_rstree.rb_node;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001626 while (n) {
1627 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001628 rc = rs_cmp(block, length, rs);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001629 if (rc < 0)
1630 n = n->rb_left;
1631 else if (rc > 0)
1632 n = n->rb_right;
1633 else
1634 break;
1635 }
1636
1637 if (n) {
Bob Petersona097dc7e2015-07-16 08:28:04 -05001638 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001639 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001640 n = n->rb_right;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001641 if (n == NULL)
1642 break;
1643 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1644 }
1645 }
1646
1647 spin_unlock(&rgd->rd_rsspin);
1648 return block;
1649}
1650
1651/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001652 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1653 * @rbm: The current position in the resource group
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001654 * @ip: The inode for which we are searching for blocks
1655 * @minext: The minimum extent length
Bob Peterson5ce13432013-11-06 10:55:52 -05001656 * @maxext: A pointer to the maximum extent structure
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001657 *
1658 * This checks the current position in the rgrp to see whether there is
1659 * a reservation covering this block. If not then this function is a
1660 * no-op. If there is, then the position is moved to the end of the
1661 * contiguous reservation(s) so that we are pointing at the first
1662 * non-reserved block.
1663 *
1664 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1665 */
1666
1667static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001668 const struct gfs2_inode *ip,
Bob Peterson5ce13432013-11-06 10:55:52 -05001669 u32 minext,
1670 struct gfs2_extent *maxext)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001671{
1672 u64 block = gfs2_rbm_to_block(rbm);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001673 u32 extlen = 1;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001674 u64 nblock;
1675 int ret;
1676
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001677 /*
1678 * If we have a minimum extent length, then skip over any extent
1679 * which is less than the min extent length in size.
1680 */
1681 if (minext) {
1682 extlen = gfs2_free_extlen(rbm, minext);
Bob Peterson5ce13432013-11-06 10:55:52 -05001683 if (extlen <= maxext->len)
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001684 goto fail;
1685 }
1686
1687 /*
1688 * Check the extent which has been found against the reservations
1689 * and skip if parts of it are already reserved
1690 */
1691 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
Bob Peterson5ce13432013-11-06 10:55:52 -05001692 if (nblock == block) {
1693 if (!minext || extlen >= minext)
1694 return 0;
1695
1696 if (extlen > maxext->len) {
1697 maxext->len = extlen;
1698 maxext->rbm = *rbm;
1699 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001700fail:
Bob Peterson5ce13432013-11-06 10:55:52 -05001701 nblock = block + extlen;
1702 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001703 ret = gfs2_rbm_from_block(rbm, nblock);
1704 if (ret < 0)
1705 return ret;
1706 return 1;
1707}
1708
1709/**
1710 * gfs2_rbm_find - Look for blocks of a particular state
1711 * @rbm: Value/result starting position and final position
1712 * @state: The state which we want to find
Bob Peterson5ce13432013-11-06 10:55:52 -05001713 * @minext: Pointer to the requested extent length (NULL for a single block)
1714 * This is updated to be the actual reservation size.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001715 * @ip: If set, check for reservations
1716 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1717 * around until we've reached the starting point.
1718 *
1719 * Side effects:
1720 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1721 * has no free blocks in it.
Bob Peterson5ea50502013-11-25 11:16:25 +00001722 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1723 * has come up short on a free block search.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001724 *
1725 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1726 */
1727
Bob Peterson5ce13432013-11-06 10:55:52 -05001728static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -05001729 const struct gfs2_inode *ip, bool nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001730{
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001731 bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001732 struct buffer_head *bh;
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001733 int last_bii;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001734 u32 offset;
1735 u8 *buffer;
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001736 bool wrapped = false;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001737 int ret;
Bob Petersone579ed42013-09-17 13:12:15 -04001738 struct gfs2_bitmap *bi;
Bob Peterson5ce13432013-11-06 10:55:52 -05001739 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001740
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001741 /*
1742 * Determine the last bitmap to search. If we're not starting at the
1743 * beginning of a bitmap, we need to search that bitmap twice to scan
1744 * the entire resource group.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001745 */
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001746 last_bii = rbm->bii - (rbm->offset == 0);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001747
1748 while(1) {
Bob Petersone579ed42013-09-17 13:12:15 -04001749 bi = rbm_bi(rbm);
Bob Petersone79e0e12018-06-18 13:24:13 -05001750 if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
1751 test_bit(GBF_FULL, &bi->bi_flags) &&
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001752 (state == GFS2_BLKST_FREE))
1753 goto next_bitmap;
1754
Bob Petersone579ed42013-09-17 13:12:15 -04001755 bh = bi->bi_bh;
1756 buffer = bh->b_data + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001757 WARN_ON(!buffer_uptodate(bh));
Bob Petersone579ed42013-09-17 13:12:15 -04001758 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1759 buffer = bi->bi_clone + bi->bi_offset;
Andreas Gruenbacher281b4952018-09-26 23:32:46 +01001760 offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001761 if (offset == BFITNOENT) {
1762 if (state == GFS2_BLKST_FREE && rbm->offset == 0)
1763 set_bit(GBF_FULL, &bi->bi_flags);
1764 goto next_bitmap;
1765 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001766 rbm->offset = offset;
1767 if (ip == NULL)
1768 return 0;
1769
Bob Peterson5ce13432013-11-06 10:55:52 -05001770 ret = gfs2_reservation_check_and_update(rbm, ip,
1771 minext ? *minext : 0,
1772 &maxext);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001773 if (ret == 0)
1774 return 0;
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001775 if (ret > 0)
Bob Peterson8d8b7522012-08-07 13:28:17 -04001776 goto next_iter;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001777 if (ret == -E2BIG) {
Bob Petersone579ed42013-09-17 13:12:15 -04001778 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001779 rbm->offset = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001780 goto res_covered_end_of_rgrp;
1781 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001782 return ret;
1783
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001784next_bitmap: /* Find next bitmap in the rgrp */
1785 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001786 rbm->bii++;
1787 if (rbm->bii == rbm->rgd->rd_length)
1788 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001789res_covered_end_of_rgrp:
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001790 if (rbm->bii == 0) {
1791 if (wrapped)
1792 break;
1793 wrapped = true;
1794 if (nowrap)
1795 break;
1796 }
Bob Peterson8d8b7522012-08-07 13:28:17 -04001797next_iter:
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001798 /* Have we scanned the entire resource group? */
1799 if (wrapped && rbm->bii > last_bii)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001800 break;
1801 }
1802
Bob Peterson5ce13432013-11-06 10:55:52 -05001803 if (minext == NULL || state != GFS2_BLKST_FREE)
1804 return -ENOSPC;
1805
Bob Peterson5ea50502013-11-25 11:16:25 +00001806 /* If the extent was too small, and it's smaller than the smallest
1807 to have failed before, remember for future reference that it's
1808 useless to search this rgrp again for this amount or more. */
Andreas Gruenbacher71921ef2019-03-14 16:48:48 +01001809 if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
1810 *minext < rbm->rgd->rd_extfail_pt)
Bob Peterson5ea50502013-11-25 11:16:25 +00001811 rbm->rgd->rd_extfail_pt = *minext;
1812
Bob Peterson5ce13432013-11-06 10:55:52 -05001813 /* If the maximum extent we found is big enough to fulfill the
1814 minimum requirements, use it anyway. */
1815 if (maxext.len) {
1816 *rbm = maxext.rbm;
1817 *minext = maxext.len;
1818 return 0;
1819 }
1820
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001821 return -ENOSPC;
1822}
1823
1824/**
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001825 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1826 * @rgd: The rgrp
Bob Peterson886b1412012-04-11 13:03:52 -04001827 * @last_unlinked: block address of the last dinode we unlinked
1828 * @skip: block address we should explicitly not unlink
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001829 *
Bob Peterson1a0eae82010-04-14 11:58:16 -04001830 * Returns: 0 if no error
1831 * The inode, if one has been found, in inode.
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001832 */
1833
Steven Whitehouse044b9412010-11-03 20:01:07 +00001834static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001835{
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001836 u64 block;
Bob Peterson5f3eae72007-08-08 16:52:09 -05001837 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001838 struct gfs2_glock *gl;
1839 struct gfs2_inode *ip;
1840 int error;
1841 int found = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001842 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001843
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001844 while (1) {
Bob Peterson5f3eae72007-08-08 16:52:09 -05001845 down_write(&sdp->sd_log_flush_lock);
Bob Peterson5ce13432013-11-06 10:55:52 -05001846 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
Bob Peterson8381e602016-05-02 09:42:49 -05001847 true);
Bob Peterson5f3eae72007-08-08 16:52:09 -05001848 up_write(&sdp->sd_log_flush_lock);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001849 if (error == -ENOSPC)
1850 break;
1851 if (WARN_ON_ONCE(error))
Bob Peterson24c73872007-07-12 16:58:50 -05001852 break;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001853
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001854 block = gfs2_rbm_to_block(&rbm);
1855 if (gfs2_rbm_from_block(&rbm, block + 1))
1856 break;
1857 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001858 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001859 if (block == skip)
Steven Whitehouse1e19a192009-07-10 21:13:38 +01001860 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001861 *last_unlinked = block;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001862
Bob Peterson5ea31bc2015-12-04 12:57:00 -06001863 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
Steven Whitehouse044b9412010-11-03 20:01:07 +00001864 if (error)
1865 continue;
1866
1867 /* If the inode is already in cache, we can ignore it here
1868 * because the existing inode disposal code will deal with
1869 * it when all refs have gone away. Accessing gl_object like
1870 * this is not safe in general. Here it is ok because we do
1871 * not dereference the pointer, and we only need an approx
1872 * answer to whether it is NULL or not.
1873 */
1874 ip = gl->gl_object;
1875
1876 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1877 gfs2_glock_put(gl);
1878 else
1879 found++;
1880
1881 /* Limit reclaim to sensible number of tasks */
Bob Peterson44ad37d2011-03-17 16:19:58 -04001882 if (found > NR_CPUS)
Steven Whitehouse044b9412010-11-03 20:01:07 +00001883 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001884 }
1885
1886 rgd->rd_flags &= ~GFS2_RDF_CHECK;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001887 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001888}
1889
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001890/**
1891 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1892 * @rgd: The rgrp in question
1893 * @loops: An indication of how picky we can be (0=very, 1=less so)
1894 *
1895 * This function uses the recently added glock statistics in order to
1896 * figure out whether a parciular resource group is suffering from
1897 * contention from multiple nodes. This is done purely on the basis
1898 * of timings, since this is the only data we have to work with and
1899 * our aim here is to reject a resource group which is highly contended
1900 * but (very important) not to do this too often in order to ensure that
1901 * we do not land up introducing fragmentation by changing resource
1902 * groups when not actually required.
1903 *
1904 * The calculation is fairly simple, we want to know whether the SRTTB
1905 * (i.e. smoothed round trip time for blocking operations) to acquire
1906 * the lock for this rgrp's glock is significantly greater than the
1907 * time taken for resource groups on average. We introduce a margin in
1908 * the form of the variable @var which is computed as the sum of the two
1909 * respective variences, and multiplied by a factor depending on @loops
1910 * and whether we have a lot of data to base the decision on. This is
1911 * then tested against the square difference of the means in order to
1912 * decide whether the result is statistically significant or not.
1913 *
1914 * Returns: A boolean verdict on the congestion status
1915 */
1916
1917static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1918{
1919 const struct gfs2_glock *gl = rgd->rd_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001920 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001921 struct gfs2_lkstats *st;
Ben Hutchings4d207132015-08-27 12:51:45 -05001922 u64 r_dcount, l_dcount;
1923 u64 l_srttb, a_srttb = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001924 s64 srttb_diff;
Ben Hutchings4d207132015-08-27 12:51:45 -05001925 u64 sqr_diff;
1926 u64 var;
Bob Peterson0166b192015-04-22 11:24:12 -05001927 int cpu, nonzero = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001928
1929 preempt_disable();
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001930 for_each_present_cpu(cpu) {
1931 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001932 if (st->stats[GFS2_LKS_SRTTB]) {
1933 a_srttb += st->stats[GFS2_LKS_SRTTB];
1934 nonzero++;
1935 }
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001936 }
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001937 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001938 if (nonzero)
1939 do_div(a_srttb, nonzero);
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001940 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1941 var = st->stats[GFS2_LKS_SRTTVARB] +
1942 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1943 preempt_enable();
1944
1945 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1946 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1947
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001948 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001949 return false;
1950
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001951 srttb_diff = a_srttb - l_srttb;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001952 sqr_diff = srttb_diff * srttb_diff;
1953
1954 var *= 2;
1955 if (l_dcount < 8 || r_dcount < 8)
1956 var *= 2;
1957 if (loops == 1)
1958 var *= 2;
1959
1960 return ((srttb_diff < 0) && (sqr_diff > var));
1961}
1962
1963/**
1964 * gfs2_rgrp_used_recently
1965 * @rs: The block reservation with the rgrp to test
1966 * @msecs: The time limit in milliseconds
1967 *
1968 * Returns: True if the rgrp glock has been used within the time limit
1969 */
1970static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1971 u64 msecs)
1972{
1973 u64 tdiff;
1974
1975 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1976 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1977
1978 return tdiff > (msecs * 1000 * 1000);
1979}
1980
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001981static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1982{
1983 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1984 u32 skip;
1985
1986 get_random_bytes(&skip, sizeof(skip));
1987 return skip % sdp->sd_rgrps;
1988}
1989
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001990static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1991{
1992 struct gfs2_rgrpd *rgd = *pos;
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001993 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001994
1995 rgd = gfs2_rgrpd_get_next(rgd);
1996 if (rgd == NULL)
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001997 rgd = gfs2_rgrpd_get_first(sdp);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001998 *pos = rgd;
1999 if (rgd != begin) /* If we didn't wrap */
2000 return true;
2001 return false;
2002}
2003
Steven Whitehousec8cdf472007-06-08 10:05:33 +01002004/**
Bob Peterson0e27c182014-10-29 08:02:28 -05002005 * fast_to_acquire - determine if a resource group will be fast to acquire
2006 *
2007 * If this is one of our preferred rgrps, it should be quicker to acquire,
2008 * because we tried to set ourselves up as dlm lock master.
2009 */
2010static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
2011{
2012 struct gfs2_glock *gl = rgd->rd_gl;
2013
2014 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
2015 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
2016 !test_bit(GLF_DEMOTE, &gl->gl_flags))
2017 return 1;
2018 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
2019 return 1;
2020 return 0;
2021}
2022
2023/**
Bob Peterson666d1d82012-06-13 23:03:56 -04002024 * gfs2_inplace_reserve - Reserve space in the filesystem
David Teiglandb3b94fa2006-01-16 16:50:04 +00002025 * @ip: the inode to reserve space for
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002026 * @ap: the allocation parameters
David Teiglandb3b94fa2006-01-16 16:50:04 +00002027 *
Abhi Das25435e52015-03-18 12:04:37 -05002028 * We try our best to find an rgrp that has at least ap->target blocks
2029 * available. After a couple of passes (loops == 2), the prospects of finding
2030 * such an rgrp diminish. At this stage, we return the first rgrp that has
Andreas Gruenbacher243fea42018-10-02 10:22:41 +01002031 * at least ap->min_target blocks available. Either way, we set ap->allowed to
Abhi Das25435e52015-03-18 12:04:37 -05002032 * the number of blocks available in the chosen rgrp.
2033 *
2034 * Returns: 0 on success,
2035 * -ENOMEM if a suitable rgrp can't be found
2036 * errno otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00002037 */
2038
Abhi Das25435e52015-03-18 12:04:37 -05002039int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002040{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002041 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002042 struct gfs2_rgrpd *begin = NULL;
Bob Petersona097dc7e2015-07-16 08:28:04 -05002043 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00002044 int error = 0, rg_locked, flags = 0;
Bob Peterson666d1d82012-06-13 23:03:56 -04002045 u64 last_unlinked = NO_BLOCK;
Bob Peterson7c9ca622011-08-31 09:53:19 +01002046 int loops = 0;
Bob Petersonf6753df2018-05-30 14:05:15 -05002047 u32 free_blocks, skip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002048
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002049 if (sdp->sd_args.ar_rgrplvb)
2050 flags |= GL_SKIP;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002051 if (gfs2_assert_warn(sdp, ap->target))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002052 return -EINVAL;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002053 if (gfs2_rs_active(rs)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002054 begin = rs->rs_rbm.rgd;
Andreas Gruenbacherb7eba892018-06-21 07:42:37 -05002055 } else if (rs->rs_rbm.rgd &&
2056 rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
2057 begin = rs->rs_rbm.rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002058 } else {
Abhi Das00a158b2014-09-18 21:40:28 -05002059 check_and_update_goal(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002060 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002061 }
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002062 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002063 skip = gfs2_orlov_skip(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002064 if (rs->rs_rbm.rgd == NULL)
Bob Peterson7c9ca622011-08-31 09:53:19 +01002065 return -EBADSLT;
2066
2067 while (loops < 3) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002068 rg_locked = 1;
Abhijith Das292c8c12007-11-29 14:13:54 -06002069
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002070 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2071 rg_locked = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002072 if (skip && skip--)
2073 goto next_rgrp;
Bob Peterson0e27c182014-10-29 08:02:28 -05002074 if (!gfs2_rs_active(rs)) {
2075 if (loops == 0 &&
2076 !fast_to_acquire(rs->rs_rbm.rgd))
2077 goto next_rgrp;
2078 if ((loops < 2) &&
2079 gfs2_rgrp_used_recently(rs, 1000) &&
2080 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2081 goto next_rgrp;
2082 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002083 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002084 LM_ST_EXCLUSIVE, flags,
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01002085 &ip->i_rgd_gh);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002086 if (unlikely(error))
2087 return error;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00002088 if (!gfs2_rs_active(rs) && (loops < 2) &&
2089 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2090 goto skip_rgrp;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002091 if (sdp->sd_args.ar_rgrplvb) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002092 error = update_rgrp_lvb(rs->rs_rbm.rgd);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002093 if (unlikely(error)) {
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01002094 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002095 return error;
2096 }
2097 }
Abhijith Das292c8c12007-11-29 14:13:54 -06002098 }
Bob Peterson666d1d82012-06-13 23:03:56 -04002099
Andreas Gruenbacher243fea42018-10-02 10:22:41 +01002100 /* Skip unusable resource groups */
Bob Peterson5ea50502013-11-25 11:16:25 +00002101 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2102 GFS2_RDF_ERROR)) ||
Abhi Das25435e52015-03-18 12:04:37 -05002103 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002104 goto skip_rgrp;
2105
2106 if (sdp->sd_args.ar_rgrplvb)
2107 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2108
2109 /* Get a reservation if we don't already have one */
2110 if (!gfs2_rs_active(rs))
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002111 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002112
2113 /* Skip rgrps when we can't get a reservation on first pass */
2114 if (!gfs2_rs_active(rs) && (loops < 1))
2115 goto check_rgrp;
2116
2117 /* If rgrp has enough free space, use it */
Bob Petersonf6753df2018-05-30 14:05:15 -05002118 free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
2119 if (free_blocks >= ap->target ||
Abhi Das25435e52015-03-18 12:04:37 -05002120 (loops == 2 && ap->min_target &&
Bob Petersonf6753df2018-05-30 14:05:15 -05002121 free_blocks >= ap->min_target)) {
2122 ap->allowed = free_blocks;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002123 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002124 }
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002125check_rgrp:
2126 /* Check for unlinked inodes which can be reclaimed */
2127 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2128 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2129 ip->i_no_addr);
2130skip_rgrp:
Bob Peterson1330edb2013-11-06 10:58:00 -05002131 /* Drop reservation, if we couldn't use reserved rgrp */
2132 if (gfs2_rs_active(rs))
2133 gfs2_rs_deltree(rs);
2134
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002135 /* Unlock rgrp if required */
2136 if (!rg_locked)
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01002137 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002138next_rgrp:
2139 /* Find the next rgrp, and continue looking */
2140 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2141 continue;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002142 if (skip)
2143 continue;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002144
2145 /* If we've scanned all the rgrps, but found no free blocks
2146 * then this checks for some less likely conditions before
2147 * trying again.
2148 */
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002149 loops++;
2150 /* Check that fs hasn't grown if writing to rindex */
2151 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2152 error = gfs2_ri_update(ip);
2153 if (error)
2154 return error;
2155 }
2156 /* Flushing the log may release space */
2157 if (loops == 2)
Bob Peterson805c09072018-01-08 10:34:17 -05002158 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
2159 GFS2_LFC_INPLACE_RESERVE);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002160 }
2161
2162 return -ENOSPC;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002163}
2164
2165/**
2166 * gfs2_inplace_release - release an inplace reservation
2167 * @ip: the inode the reservation was taken out on
2168 *
2169 * Release a reservation made by gfs2_inplace_reserve().
2170 */
2171
2172void gfs2_inplace_release(struct gfs2_inode *ip)
2173{
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01002174 if (gfs2_holder_initialized(&ip->i_rgd_gh))
2175 gfs2_glock_dq_uninit(&ip->i_rgd_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002176}
2177
2178/**
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002179 * gfs2_alloc_extent - allocate an extent from a given bitmap
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002180 * @rbm: the resource group information
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002181 * @dinode: TRUE if the first block we allocate is for a dinode
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002182 * @n: The extent length (value/result)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002183 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002184 * Add the bitmap buffer to the transaction.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002185 * Set the found bits to @new_state to change block's allocation state.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002186 */
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002187static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002188 unsigned int *n)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002189{
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002190 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002191 const unsigned int elen = *n;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002192 u64 block;
2193 int ret;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002194
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002195 *n = 1;
2196 block = gfs2_rbm_to_block(rbm);
Bob Petersone579ed42013-09-17 13:12:15 -04002197 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002198 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002199 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002200 while (*n < elen) {
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002201 ret = gfs2_rbm_from_block(&pos, block);
Bob Petersondffe12a2018-08-07 10:07:00 -05002202 if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002203 break;
Bob Petersone579ed42013-09-17 13:12:15 -04002204 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002205 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002206 (*n)++;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002207 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002208 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002209}
2210
2211/**
2212 * rgblk_free - Change alloc state of given block(s)
2213 * @sdp: the filesystem
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002214 * @rgd: the resource group the blocks are in
David Teiglandb3b94fa2006-01-16 16:50:04 +00002215 * @bstart: the start of a run of blocks to free
2216 * @blen: the length of the block run (all must lie within ONE RG!)
2217 * @new_state: GFS2_BLKST_XXX the after-allocation block state
David Teiglandb3b94fa2006-01-16 16:50:04 +00002218 */
2219
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002220static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
2221 u64 bstart, u32 blen, unsigned char new_state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002222{
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002223 struct gfs2_rbm rbm;
Bob Petersond24e0562014-10-03 08:38:06 -04002224 struct gfs2_bitmap *bi, *bi_prev = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002225
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002226 rbm.rgd = rgd;
Andreas Gruenbacherf6546832018-09-10 17:31:47 +01002227 if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002228 return;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002229 while (blen--) {
Bob Petersone579ed42013-09-17 13:12:15 -04002230 bi = rbm_bi(&rbm);
Bob Petersond24e0562014-10-03 08:38:06 -04002231 if (bi != bi_prev) {
2232 if (!bi->bi_clone) {
2233 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2234 GFP_NOFS | __GFP_NOFAIL);
2235 memcpy(bi->bi_clone + bi->bi_offset,
2236 bi->bi_bh->b_data + bi->bi_offset,
Andreas Gruenbacher281b4952018-09-26 23:32:46 +01002237 bi->bi_bytes);
Bob Petersond24e0562014-10-03 08:38:06 -04002238 }
2239 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2240 bi_prev = bi;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002241 }
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002242 gfs2_setbit(&rbm, false, new_state);
Bob Petersond24e0562014-10-03 08:38:06 -04002243 gfs2_rbm_incr(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002244 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002245}
2246
2247/**
Steven Whitehouse09010972009-05-20 10:48:47 +01002248 * gfs2_rgrp_dump - print out an rgrp
2249 * @seq: The iterator
2250 * @gl: The glock in question
Bob Peterson3792ce92019-05-09 09:21:48 -05002251 * @fs_id_buf: pointer to file system id (if requested)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002252 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00002253 */
2254
Bob Peterson3792ce92019-05-09 09:21:48 -05002255void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
2256 const char *fs_id_buf)
Steven Whitehouse09010972009-05-20 10:48:47 +01002257{
Bob Peterson8e2e0042012-07-19 08:12:40 -04002258 struct gfs2_rgrpd *rgd = gl->gl_object;
2259 struct gfs2_blkreserv *trs;
2260 const struct rb_node *n;
2261
Steven Whitehouse09010972009-05-20 10:48:47 +01002262 if (rgd == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002263 return;
Bob Peterson3792ce92019-05-09 09:21:48 -05002264 gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
2265 fs_id_buf,
Steven Whitehouse09010972009-05-20 10:48:47 +01002266 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002267 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
Bob Peterson5ea50502013-11-25 11:16:25 +00002268 rgd->rd_reserved, rgd->rd_extfail_pt);
Bob Peterson72244b62018-08-15 12:09:49 -05002269 if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
2270 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
2271
Bob Peterson3792ce92019-05-09 09:21:48 -05002272 gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
Bob Peterson72244b62018-08-15 12:09:49 -05002273 be32_to_cpu(rgl->rl_flags),
2274 be32_to_cpu(rgl->rl_free),
2275 be32_to_cpu(rgl->rl_dinodes));
2276 }
Bob Peterson8e2e0042012-07-19 08:12:40 -04002277 spin_lock(&rgd->rd_rsspin);
2278 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2279 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Bob Peterson3792ce92019-05-09 09:21:48 -05002280 dump_rs(seq, trs, fs_id_buf);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002281 }
2282 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse09010972009-05-20 10:48:47 +01002283}
2284
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002285static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2286{
2287 struct gfs2_sbd *sdp = rgd->rd_sbd;
Bob Peterson98fb0572019-08-13 09:25:15 -04002288 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
Bob Peterson3792ce92019-05-09 09:21:48 -05002289
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002290 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
Steven Whitehouse86d00632009-09-14 09:50:57 +01002291 (unsigned long long)rgd->rd_addr);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002292 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
Bob Peterson3792ce92019-05-09 09:21:48 -05002293 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
2294 gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002295 rgd->rd_flags |= GFS2_RDF_ERROR;
2296}
2297
Steven Whitehouse09010972009-05-20 10:48:47 +01002298/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002299 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2300 * @ip: The inode we have just allocated blocks for
2301 * @rbm: The start of the allocated blocks
2302 * @len: The extent length
Bob Peterson8e2e0042012-07-19 08:12:40 -04002303 *
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002304 * Adjusts a reservation after an allocation has taken place. If the
2305 * reservation does not match the allocation, or if it is now empty
2306 * then it is removed.
Bob Peterson8e2e0042012-07-19 08:12:40 -04002307 */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002308
2309static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2310 const struct gfs2_rbm *rbm, unsigned len)
Bob Peterson8e2e0042012-07-19 08:12:40 -04002311{
Bob Petersona097dc7e2015-07-16 08:28:04 -05002312 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002313 struct gfs2_rgrpd *rgd = rbm->rgd;
2314 unsigned rlen;
2315 u64 block;
2316 int ret;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002317
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002318 spin_lock(&rgd->rd_rsspin);
2319 if (gfs2_rs_active(rs)) {
2320 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2321 block = gfs2_rbm_to_block(rbm);
2322 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2323 rlen = min(rs->rs_free, len);
2324 rs->rs_free -= rlen;
2325 rgd->rd_reserved -= rlen;
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002326 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002327 if (rs->rs_free && !ret)
2328 goto out;
Bob Peterson1a855032014-10-29 08:02:30 -05002329 /* We used up our block reservation, so we should
2330 reserve more blocks next time. */
Andreas Gruenbacher21f09c42018-08-30 16:01:50 +01002331 atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002332 }
Bob Peterson20095212013-03-13 10:26:38 -04002333 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002334 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002335out:
2336 spin_unlock(&rgd->rd_rsspin);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002337}
2338
2339/**
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002340 * gfs2_set_alloc_start - Set starting point for block allocation
2341 * @rbm: The rbm which will be set to the required location
2342 * @ip: The gfs2 inode
2343 * @dinode: Flag to say if allocation includes a new inode
2344 *
2345 * This sets the starting point from the reservation if one is active
2346 * otherwise it falls back to guessing a start point based on the
2347 * inode's goal block or the last allocation point in the rgrp.
2348 */
2349
2350static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2351 const struct gfs2_inode *ip, bool dinode)
2352{
2353 u64 goal;
2354
Bob Petersona097dc7e2015-07-16 08:28:04 -05002355 if (gfs2_rs_active(&ip->i_res)) {
2356 *rbm = ip->i_res.rs_rbm;
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002357 return;
2358 }
2359
2360 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2361 goal = ip->i_goal;
2362 else
2363 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2364
Andreas Gruenbacherf6546832018-09-10 17:31:47 +01002365 if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
2366 rbm->bii = 0;
2367 rbm->offset = 0;
2368 }
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002369}
2370
2371/**
Bob Peterson6e87ed02011-11-18 10:58:32 -05002372 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
Steven Whitehouse09010972009-05-20 10:48:47 +01002373 * @ip: the inode to allocate the block for
2374 * @bn: Used to return the starting block number
Bob Peterson8e2e0042012-07-19 08:12:40 -04002375 * @nblocks: requested number of blocks/extent length (value/result)
Bob Peterson6e87ed02011-11-18 10:58:32 -05002376 * @dinode: 1 if we're allocating a dinode block, else 0
Bob Peterson3c5d7852011-11-14 11:17:08 -05002377 * @generation: the generation number of the inode
Steven Whitehouse09010972009-05-20 10:48:47 +01002378 *
2379 * Returns: 0 or error
2380 */
2381
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002382int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002383 bool dinode, u64 *generation)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002384{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002385 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002386 struct buffer_head *dibh;
Andreas Gruenbacherb7eba892018-06-21 07:42:37 -05002387 struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002388 unsigned int ndata;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002389 u64 block; /* block, within the file system scope */
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002390 int error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002391
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002392 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002393 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002394
Steven Whitehouse137834a2012-08-23 13:43:40 +01002395 if (error == -ENOSPC) {
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002396 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002397 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
Steven Whitehouse137834a2012-08-23 13:43:40 +01002398 }
2399
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002400 /* Since all blocks are reserved in advance, this shouldn't happen */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002401 if (error) {
Bob Peterson5ea50502013-11-25 11:16:25 +00002402 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002403 (unsigned long long)ip->i_no_addr, error, *nblocks,
Bob Peterson5ea50502013-11-25 11:16:25 +00002404 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2405 rbm.rgd->rd_extfail_pt);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002406 goto rgrp_error;
2407 }
2408
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002409 gfs2_alloc_extent(&rbm, dinode, nblocks);
2410 block = gfs2_rbm_to_block(&rbm);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002411 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
Bob Petersona097dc7e2015-07-16 08:28:04 -05002412 if (gfs2_rs_active(&ip->i_res))
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002413 gfs2_adjust_reservation(ip, &rbm, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002414 ndata = *nblocks;
2415 if (dinode)
2416 ndata--;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002417
Bob Peterson3c5d7852011-11-14 11:17:08 -05002418 if (!dinode) {
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002419 ip->i_goal = block + ndata - 1;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002420 error = gfs2_meta_inode_buffer(ip, &dibh);
2421 if (error == 0) {
2422 struct gfs2_dinode *di =
2423 (struct gfs2_dinode *)dibh->b_data;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002424 gfs2_trans_add_meta(ip->i_gl, dibh);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002425 di->di_goal_meta = di->di_goal_data =
2426 cpu_to_be64(ip->i_goal);
2427 brelse(dibh);
2428 }
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002429 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002430 if (rbm.rgd->rd_free < *nblocks) {
Bob Petersone54c78a2018-10-03 08:47:36 -05002431 fs_warn(sdp, "nblocks=%u\n", *nblocks);
Steven Whitehouse09010972009-05-20 10:48:47 +01002432 goto rgrp_error;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002433 }
Steven Whitehouse09010972009-05-20 10:48:47 +01002434
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002435 rbm.rgd->rd_free -= *nblocks;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002436 if (dinode) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002437 rbm.rgd->rd_dinodes++;
2438 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002439 if (*generation == 0)
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002440 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002441 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002442
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002443 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002444 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002445
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002446 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002447 if (dinode)
Andreas Gruenbacherfbb27872019-04-05 12:18:23 +01002448 gfs2_trans_remove_revoke(sdp, block, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002449
Steven Whitehousefd4b4e02013-02-26 16:15:20 +00002450 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002451
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002452 rbm.rgd->rd_free_clone -= *nblocks;
2453 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002454 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002455 *bn = block;
2456 return 0;
2457
2458rgrp_error:
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002459 gfs2_rgrp_error(rbm.rgd);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002460 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002461}
2462
2463/**
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002464 * __gfs2_free_blocks - free a contiguous run of block(s)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002465 * @ip: the inode these blocks are being freed from
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002466 * @rgd: the resource group the blocks are in
David Teiglandb3b94fa2006-01-16 16:50:04 +00002467 * @bstart: first block of a run of contiguous blocks
2468 * @blen: the length of the block run
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002469 * @meta: 1 if the blocks represent metadata
David Teiglandb3b94fa2006-01-16 16:50:04 +00002470 *
2471 */
2472
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002473void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2474 u64 bstart, u32 blen, int meta)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002475{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002476 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002477
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002478 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002479 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002480 rgd->rd_free += blen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002481 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002482 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002483 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002484
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002485 /* Directories keep their data in the metadata address space */
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002486 if (meta || ip->i_depth)
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002487 gfs2_meta_wipe(ip, bstart, blen);
Bob Peterson4c16c362011-02-23 16:11:33 -05002488}
David Teiglandb3b94fa2006-01-16 16:50:04 +00002489
Bob Peterson4c16c362011-02-23 16:11:33 -05002490/**
Bob Peterson4c16c362011-02-23 16:11:33 -05002491 * gfs2_free_meta - free a contiguous run of data block(s)
2492 * @ip: the inode these blocks are being freed from
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002493 * @rgd: the resource group the blocks are in
Bob Peterson4c16c362011-02-23 16:11:33 -05002494 * @bstart: first block of a run of contiguous blocks
2495 * @blen: the length of the block run
2496 *
2497 */
2498
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002499void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
2500 u64 bstart, u32 blen)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002501{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002502 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002503
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002504 __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002505 gfs2_statfs_change(sdp, 0, +blen, 0);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002506 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002507}
2508
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002509void gfs2_unlink_di(struct inode *inode)
2510{
2511 struct gfs2_inode *ip = GFS2_I(inode);
2512 struct gfs2_sbd *sdp = GFS2_SB(inode);
2513 struct gfs2_rgrpd *rgd;
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002514 u64 blkno = ip->i_no_addr;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002515
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002516 rgd = gfs2_blk2rgrpd(sdp, blkno, true);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002517 if (!rgd)
2518 return;
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002519 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002520 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002521 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002522 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Bob Petersonf5580d02018-08-08 09:53:30 -05002523 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002524}
2525
Bob Petersona18c78c2017-11-22 09:24:14 -06002526void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002527{
2528 struct gfs2_sbd *sdp = rgd->rd_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002529
Andreas Gruenbacher0ddeded2018-10-04 15:36:02 +01002530 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
Steven Whitehouse73f74942008-11-04 10:32:57 +00002531 if (!rgd->rd_dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002532 gfs2_consist_rgrpd(rgd);
Steven Whitehouse73f74942008-11-04 10:32:57 +00002533 rgd->rd_dinodes--;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002534 rgd->rd_free++;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002535
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002536 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002537 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Bob Petersonf5580d02018-08-08 09:53:30 -05002538 be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002539
2540 gfs2_statfs_change(sdp, 0, +1, -1);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002541 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002542 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002543 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002544}
2545
2546/**
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002547 * gfs2_check_blk_type - Check the type of a block
2548 * @sdp: The superblock
2549 * @no_addr: The block number to check
2550 * @type: The block type we are looking for
2551 *
2552 * Returns: 0 if the block type matches the expected type
2553 * -ESTALE if it doesn't match
2554 * or -ve errno if something went wrong while checking
2555 */
2556
2557int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2558{
2559 struct gfs2_rgrpd *rgd;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002560 struct gfs2_holder rgd_gh;
Bob Petersondffe12a2018-08-07 10:07:00 -05002561 struct gfs2_rbm rbm;
Bob Peterson58884c42012-03-05 10:19:35 -05002562 int error = -EINVAL;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002563
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002564 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002565 if (!rgd)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002566 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002567
2568 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2569 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002570 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002571
Bob Petersondffe12a2018-08-07 10:07:00 -05002572 rbm.rgd = rgd;
2573 error = gfs2_rbm_from_block(&rbm, no_addr);
Andreas Gruenbacherf6546832018-09-10 17:31:47 +01002574 if (WARN_ON_ONCE(error))
2575 goto fail;
Bob Petersondffe12a2018-08-07 10:07:00 -05002576
2577 if (gfs2_testbit(&rbm, false) != type)
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002578 error = -ESTALE;
2579
2580 gfs2_glock_dq_uninit(&rgd_gh);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002581fail:
2582 return error;
2583}
2584
2585/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00002586 * gfs2_rlist_add - add a RG to a list of RGs
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002587 * @ip: the inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00002588 * @rlist: the list of resource groups
2589 * @block: the block
2590 *
2591 * Figure out what RG a block belongs to and add that RG to the list
2592 *
2593 * FIXME: Don't use NOFAIL
2594 *
2595 */
2596
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002597void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
Steven Whitehousecd915492006-09-04 12:49:07 -04002598 u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002599{
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002600 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002601 struct gfs2_rgrpd *rgd;
2602 struct gfs2_rgrpd **tmp;
2603 unsigned int new_space;
2604 unsigned int x;
2605
2606 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2607 return;
2608
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002609 /*
2610 * The resource group last accessed is kept in the last position.
2611 */
2612
2613 if (rlist->rl_rgrps) {
2614 rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
2615 if (rgrp_contains_block(rgd, block))
2616 return;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002617 rgd = gfs2_blk2rgrpd(sdp, block, 1);
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002618 } else {
Andreas Gruenbacherb7eba892018-06-21 07:42:37 -05002619 rgd = ip->i_res.rs_rbm.rgd;
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002620 if (!rgd || !rgrp_contains_block(rgd, block))
2621 rgd = gfs2_blk2rgrpd(sdp, block, 1);
2622 }
2623
David Teiglandb3b94fa2006-01-16 16:50:04 +00002624 if (!rgd) {
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002625 fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
2626 (unsigned long long)block);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002627 return;
2628 }
2629
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002630 for (x = 0; x < rlist->rl_rgrps; x++) {
2631 if (rlist->rl_rgd[x] == rgd) {
2632 swap(rlist->rl_rgd[x],
2633 rlist->rl_rgd[rlist->rl_rgrps - 1]);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002634 return;
Andreas Gruenbacher03f8c412018-06-21 07:22:12 -05002635 }
2636 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002637
2638 if (rlist->rl_rgrps == rlist->rl_space) {
2639 new_space = rlist->rl_space + 10;
2640
2641 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002642 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002643
2644 if (rlist->rl_rgd) {
2645 memcpy(tmp, rlist->rl_rgd,
2646 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2647 kfree(rlist->rl_rgd);
2648 }
2649
2650 rlist->rl_space = new_space;
2651 rlist->rl_rgd = tmp;
2652 }
2653
2654 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2655}
2656
2657/**
2658 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2659 * and initialize an array of glock holders for them
2660 * @rlist: the list of resource groups
David Teiglandb3b94fa2006-01-16 16:50:04 +00002661 *
2662 * FIXME: Don't use NOFAIL
2663 *
2664 */
2665
Bob Petersonc3abc292018-10-04 00:06:23 +01002666void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002667{
2668 unsigned int x;
2669
Kees Cook6da2ec52018-06-12 13:55:00 -07002670 rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
2671 sizeof(struct gfs2_holder),
2672 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002673 for (x = 0; x < rlist->rl_rgrps; x++)
2674 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
Bob Petersonc3abc292018-10-04 00:06:23 +01002675 LM_ST_EXCLUSIVE, 0,
David Teiglandb3b94fa2006-01-16 16:50:04 +00002676 &rlist->rl_ghs[x]);
2677}
2678
2679/**
2680 * gfs2_rlist_free - free a resource group list
Fabian Frederick27ff6a02014-07-02 22:05:27 +02002681 * @rlist: the list of resource groups
David Teiglandb3b94fa2006-01-16 16:50:04 +00002682 *
2683 */
2684
2685void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2686{
2687 unsigned int x;
2688
2689 kfree(rlist->rl_rgd);
2690
2691 if (rlist->rl_ghs) {
2692 for (x = 0; x < rlist->rl_rgrps; x++)
2693 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2694 kfree(rlist->rl_ghs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002695 rlist->rl_ghs = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002696 }
2697}
2698