blob: 391d1938a6c8e4212e6f05ba0564b1c4e05cb50b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110020#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_format.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110022#include "xfs_log_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110023#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100026#include "xfs_defer.h"
Dave Chinner8f661932014-06-06 15:15:59 +100027#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110030#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110031#include "xfs_inode_item.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_error.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110033#include "xfs_btree.h"
34#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_alloc.h"
Darrick J. Wonge70d8292016-08-03 11:36:08 +100036#include "xfs_rmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_ialloc.h"
38#include "xfs_fsops.h"
39#include "xfs_itable.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "xfs_trans_space.h"
41#include "xfs_rtalloc.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000042#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110044#include "xfs_filestream.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100045#include "xfs_rmap.h"
Darrick J. Wong84d69612016-10-03 09:11:44 -070046#include "xfs_ag_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
49 * File system operations
50 */
51
Dave Chinnerfd236832012-11-12 22:53:59 +110052static struct xfs_buf *
53xfs_growfs_get_hdr_buf(
54 struct xfs_mount *mp,
55 xfs_daddr_t blkno,
56 size_t numblks,
Dave Chinner1813dd62012-11-14 17:54:40 +110057 int flags,
58 const struct xfs_buf_ops *ops)
Dave Chinnerfd236832012-11-12 22:53:59 +110059{
60 struct xfs_buf *bp;
61
62 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
63 if (!bp)
64 return NULL;
65
66 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
67 bp->b_bn = blkno;
68 bp->b_maps[0].bm_bn = blkno;
Dave Chinner1813dd62012-11-14 17:54:40 +110069 bp->b_ops = ops;
Dave Chinnerfd236832012-11-12 22:53:59 +110070
71 return bp;
72}
73
Dave Chinnercce77bc2018-05-13 23:10:05 -070074/*
75 * Write new AG headers to disk. Non-transactional, but written
76 * synchronously so they are completed prior to the growfs transaction
77 * being logged.
78 */
79static int
80xfs_grow_ag_headers(
81 struct xfs_mount *mp,
82 xfs_agnumber_t agno,
83 xfs_extlen_t agsize,
84 xfs_rfsblock_t *nfree)
85{
86 struct xfs_agf *agf;
87 struct xfs_agi *agi;
88 struct xfs_agfl *agfl;
89 __be32 *agfl_bno;
90 xfs_alloc_rec_t *arec;
91 struct xfs_buf *bp;
92 int bucket;
93 xfs_extlen_t tmpsize;
94 int error = 0;
95
96 /*
97 * AG freespace header block
98 */
99 bp = xfs_growfs_get_hdr_buf(mp,
100 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
101 XFS_FSS_TO_BB(mp, 1), 0,
102 &xfs_agf_buf_ops);
103 if (!bp) {
104 error = -ENOMEM;
105 goto out_error;
106 }
107
108 agf = XFS_BUF_TO_AGF(bp);
109 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
110 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
111 agf->agf_seqno = cpu_to_be32(agno);
112 agf->agf_length = cpu_to_be32(agsize);
113 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
114 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
115 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
116 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
117 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
118 agf->agf_roots[XFS_BTNUM_RMAPi] =
119 cpu_to_be32(XFS_RMAP_BLOCK(mp));
120 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
121 agf->agf_rmap_blocks = cpu_to_be32(1);
122 }
123
124 agf->agf_flfirst = cpu_to_be32(1);
125 agf->agf_fllast = 0;
126 agf->agf_flcount = 0;
127 tmpsize = agsize - mp->m_ag_prealloc_blocks;
128 agf->agf_freeblks = cpu_to_be32(tmpsize);
129 agf->agf_longest = cpu_to_be32(tmpsize);
130 if (xfs_sb_version_hascrc(&mp->m_sb))
131 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
132 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
133 agf->agf_refcount_root = cpu_to_be32(
134 xfs_refc_block(mp));
135 agf->agf_refcount_level = cpu_to_be32(1);
136 agf->agf_refcount_blocks = cpu_to_be32(1);
137 }
138
139 error = xfs_bwrite(bp);
140 xfs_buf_relse(bp);
141 if (error)
142 goto out_error;
143
144 /*
145 * AG freelist header block
146 */
147 bp = xfs_growfs_get_hdr_buf(mp,
148 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
149 XFS_FSS_TO_BB(mp, 1), 0,
150 &xfs_agfl_buf_ops);
151 if (!bp) {
152 error = -ENOMEM;
153 goto out_error;
154 }
155
156 agfl = XFS_BUF_TO_AGFL(bp);
157 if (xfs_sb_version_hascrc(&mp->m_sb)) {
158 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
159 agfl->agfl_seqno = cpu_to_be32(agno);
160 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
161 }
162
163 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
164 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
165 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
166
167 error = xfs_bwrite(bp);
168 xfs_buf_relse(bp);
169 if (error)
170 goto out_error;
171
172 /*
173 * AG inode header block
174 */
175 bp = xfs_growfs_get_hdr_buf(mp,
176 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
177 XFS_FSS_TO_BB(mp, 1), 0,
178 &xfs_agi_buf_ops);
179 if (!bp) {
180 error = -ENOMEM;
181 goto out_error;
182 }
183
184 agi = XFS_BUF_TO_AGI(bp);
185 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
186 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
187 agi->agi_seqno = cpu_to_be32(agno);
188 agi->agi_length = cpu_to_be32(agsize);
189 agi->agi_count = 0;
190 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
191 agi->agi_level = cpu_to_be32(1);
192 agi->agi_freecount = 0;
193 agi->agi_newino = cpu_to_be32(NULLAGINO);
194 agi->agi_dirino = cpu_to_be32(NULLAGINO);
195 if (xfs_sb_version_hascrc(&mp->m_sb))
196 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
197 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
198 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
199 agi->agi_free_level = cpu_to_be32(1);
200 }
201 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
202 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
203
204 error = xfs_bwrite(bp);
205 xfs_buf_relse(bp);
206 if (error)
207 goto out_error;
208
209 /*
210 * BNO btree root block
211 */
212 bp = xfs_growfs_get_hdr_buf(mp,
213 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
214 BTOBB(mp->m_sb.sb_blocksize), 0,
215 &xfs_allocbt_buf_ops);
216
217 if (!bp) {
218 error = -ENOMEM;
219 goto out_error;
220 }
221
222 xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, agno, 0);
223
224 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
225 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
226 arec->ar_blockcount = cpu_to_be32(
227 agsize - be32_to_cpu(arec->ar_startblock));
228
229 error = xfs_bwrite(bp);
230 xfs_buf_relse(bp);
231 if (error)
232 goto out_error;
233
234 /*
235 * CNT btree root block
236 */
237 bp = xfs_growfs_get_hdr_buf(mp,
238 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
239 BTOBB(mp->m_sb.sb_blocksize), 0,
240 &xfs_allocbt_buf_ops);
241 if (!bp) {
242 error = -ENOMEM;
243 goto out_error;
244 }
245
246 xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, agno, 0);
247
248 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
249 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
250 arec->ar_blockcount = cpu_to_be32(
251 agsize - be32_to_cpu(arec->ar_startblock));
252 *nfree += be32_to_cpu(arec->ar_blockcount);
253
254 error = xfs_bwrite(bp);
255 xfs_buf_relse(bp);
256 if (error)
257 goto out_error;
258
259 /* RMAP btree root block */
260 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
261 struct xfs_rmap_rec *rrec;
262 struct xfs_btree_block *block;
263
264 bp = xfs_growfs_get_hdr_buf(mp,
265 XFS_AGB_TO_DADDR(mp, agno, XFS_RMAP_BLOCK(mp)),
266 BTOBB(mp->m_sb.sb_blocksize), 0,
267 &xfs_rmapbt_buf_ops);
268 if (!bp) {
269 error = -ENOMEM;
270 goto out_error;
271 }
272
273 xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 0,
274 agno, 0);
275 block = XFS_BUF_TO_BLOCK(bp);
276
277
278 /*
279 * mark the AG header regions as static metadata The BNO
280 * btree block is the first block after the headers, so
281 * it's location defines the size of region the static
282 * metadata consumes.
283 *
284 * Note: unlike mkfs, we never have to account for log
285 * space when growing the data regions
286 */
287 rrec = XFS_RMAP_REC_ADDR(block, 1);
288 rrec->rm_startblock = 0;
289 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
290 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
291 rrec->rm_offset = 0;
292 be16_add_cpu(&block->bb_numrecs, 1);
293
294 /* account freespace btree root blocks */
295 rrec = XFS_RMAP_REC_ADDR(block, 2);
296 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
297 rrec->rm_blockcount = cpu_to_be32(2);
298 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
299 rrec->rm_offset = 0;
300 be16_add_cpu(&block->bb_numrecs, 1);
301
302 /* account inode btree root blocks */
303 rrec = XFS_RMAP_REC_ADDR(block, 3);
304 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
305 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
306 XFS_IBT_BLOCK(mp));
307 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
308 rrec->rm_offset = 0;
309 be16_add_cpu(&block->bb_numrecs, 1);
310
311 /* account for rmap btree root */
312 rrec = XFS_RMAP_REC_ADDR(block, 4);
313 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
314 rrec->rm_blockcount = cpu_to_be32(1);
315 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
316 rrec->rm_offset = 0;
317 be16_add_cpu(&block->bb_numrecs, 1);
318
319 /* account for refc btree root */
320 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
321 rrec = XFS_RMAP_REC_ADDR(block, 5);
322 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
323 rrec->rm_blockcount = cpu_to_be32(1);
324 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
325 rrec->rm_offset = 0;
326 be16_add_cpu(&block->bb_numrecs, 1);
327 }
328
329 error = xfs_bwrite(bp);
330 xfs_buf_relse(bp);
331 if (error)
332 goto out_error;
333 }
334
335 /*
336 * INO btree root block
337 */
338 bp = xfs_growfs_get_hdr_buf(mp,
339 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
340 BTOBB(mp->m_sb.sb_blocksize), 0,
341 &xfs_inobt_buf_ops);
342 if (!bp) {
343 error = -ENOMEM;
344 goto out_error;
345 }
346
347 xfs_btree_init_block(mp, bp, XFS_BTNUM_INO , 0, 0, agno, 0);
348
349 error = xfs_bwrite(bp);
350 xfs_buf_relse(bp);
351 if (error)
352 goto out_error;
353
354 /*
355 * FINO btree root block
356 */
357 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
358 bp = xfs_growfs_get_hdr_buf(mp,
359 XFS_AGB_TO_DADDR(mp, agno, XFS_FIBT_BLOCK(mp)),
360 BTOBB(mp->m_sb.sb_blocksize), 0,
361 &xfs_inobt_buf_ops);
362 if (!bp) {
363 error = -ENOMEM;
364 goto out_error;
365 }
366
367 xfs_btree_init_block(mp, bp, XFS_BTNUM_FINO,
368 0, 0, agno, 0);
369
370 error = xfs_bwrite(bp);
371 xfs_buf_relse(bp);
372 if (error)
373 goto out_error;
374 }
375
376 /*
377 * refcount btree root block
378 */
379 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
380 bp = xfs_growfs_get_hdr_buf(mp,
381 XFS_AGB_TO_DADDR(mp, agno, xfs_refc_block(mp)),
382 BTOBB(mp->m_sb.sb_blocksize), 0,
383 &xfs_refcountbt_buf_ops);
384 if (!bp) {
385 error = -ENOMEM;
386 goto out_error;
387 }
388
389 xfs_btree_init_block(mp, bp, XFS_BTNUM_REFC,
390 0, 0, agno, 0);
391
392 error = xfs_bwrite(bp);
393 xfs_buf_relse(bp);
394 if (error)
395 goto out_error;
396 }
397
398out_error:
399 return error;
400}
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402static int
403xfs_growfs_data_private(
404 xfs_mount_t *mp, /* mount point for filesystem */
405 xfs_growfs_data_t *in) /* growfs data input struct */
406{
407 xfs_agf_t *agf;
408 xfs_agi_t *agi;
409 xfs_agnumber_t agno;
410 xfs_extlen_t agsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 int dpct;
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500413 int error, saved_error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 xfs_agnumber_t nagcount;
415 xfs_agnumber_t nagimax = 0;
416 xfs_rfsblock_t nb, nb_mod;
417 xfs_rfsblock_t new;
418 xfs_rfsblock_t nfree;
419 xfs_agnumber_t oagcount;
420 int pct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 xfs_trans_t *tp;
422
423 nb = in->newblocks;
424 pct = in->imaxpct;
425 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
Dave Chinner24513372014-06-25 14:58:08 +1000426 return -EINVAL;
Nathan Scott4cc929e2007-05-14 18:24:02 +1000427 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
428 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 dpct = pct - mp->m_sb.sb_imax_pct;
Dave Chinnerba372672014-10-02 09:05:32 +1000430 error = xfs_buf_read_uncached(mp->m_ddev_targp,
Dave Chinner1922c942010-09-22 10:47:20 +1000431 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
Dave Chinnerba372672014-10-02 09:05:32 +1000432 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
433 if (error)
Dave Chinnereab4e632012-11-12 22:54:02 +1100434 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 xfs_buf_relse(bp);
436
437 new = nb; /* use new as a temporary here */
438 nb_mod = do_div(new, mp->m_sb.sb_agblocks);
439 nagcount = new + (nb_mod != 0);
440 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
441 nagcount--;
Eric Sandeene6da7c92009-05-23 14:30:12 -0500442 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (nb < mp->m_sb.sb_dblocks)
Dave Chinner24513372014-06-25 14:58:08 +1000444 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446 new = nb - mp->m_sb.sb_dblocks;
447 oagcount = mp->m_sb.sb_agcount;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000448
449 /* allocate the new per-ag structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 if (nagcount > oagcount) {
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000451 error = xfs_initialize_perag(mp, nagcount, &nagimax);
452 if (error)
453 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000455
Christoph Hellwig253f4912016-04-06 09:19:55 +1000456 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
457 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
458 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000461 /*
462 * Write new AG headers to disk. Non-transactional, but written
463 * synchronously so they are completed prior to the growfs transaction
464 * being logged.
465 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 nfree = 0;
467 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
Dave Chinnerf94c4452013-11-21 15:41:06 +1100468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (agno == nagcount - 1)
Dave Chinnercce77bc2018-05-13 23:10:05 -0700470 agsize = nb -
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
472 else
473 agsize = mp->m_sb.sb_agblocks;
Darrick J. Wonge70d8292016-08-03 11:36:08 +1000474
Dave Chinnercce77bc2018-05-13 23:10:05 -0700475 error = xfs_grow_ag_headers(mp, agno, agsize, &nfree);
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000476 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 goto error0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
479 xfs_trans_agblocks_delta(tp, nfree);
Dave Chinnercce77bc2018-05-13 23:10:05 -0700480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 /*
482 * There are new blocks in the old last a.g.
483 */
484 if (new) {
Darrick J. Wong340785c2016-08-03 11:33:42 +1000485 struct xfs_owner_info oinfo;
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 /*
488 * Change the agi length.
489 */
490 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
491 if (error) {
492 goto error0;
493 }
494 ASSERT(bp);
495 agi = XFS_BUF_TO_AGI(bp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800496 be32_add_cpu(&agi->agi_length, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 ASSERT(nagcount == oagcount ||
Christoph Hellwig16259e72005-11-02 15:11:25 +1100498 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
500 /*
501 * Change agf length.
502 */
503 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
504 if (error) {
505 goto error0;
506 }
507 ASSERT(bp);
508 agf = XFS_BUF_TO_AGF(bp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800509 be32_add_cpu(&agf->agf_length, new);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100510 ASSERT(be32_to_cpu(agf->agf_length) ==
511 be32_to_cpu(agi->agi_length));
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000512
Tim Shimmin0164af52007-06-18 16:50:08 +1000513 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
Darrick J. Wong340785c2016-08-03 11:33:42 +1000514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 /*
516 * Free the new space.
Darrick J. Wong340785c2016-08-03 11:33:42 +1000517 *
518 * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
519 * this doesn't actually exist in the rmap btree.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 */
Darrick J. Wong340785c2016-08-03 11:33:42 +1000521 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
Darrick J. Wong33df3a92017-12-07 19:07:27 -0800522 error = xfs_rmap_free(tp, bp, agno,
523 be32_to_cpu(agf->agf_length) - new,
524 new, &oinfo);
525 if (error)
526 goto error0;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000527 error = xfs_free_extent(tp,
528 XFS_AGB_TO_FSB(mp, agno,
529 be32_to_cpu(agf->agf_length) - new),
Darrick J. Wong3fd129b2016-09-19 10:30:52 +1000530 new, &oinfo, XFS_AG_RESV_NONE);
Darrick J. Wong340785c2016-08-03 11:33:42 +1000531 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 goto error0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000534
535 /*
536 * Update changed superblock fields transactionally. These are not
537 * seen by the rest of the world until the transaction commit applies
538 * them atomically to the superblock.
539 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (nagcount > oagcount)
541 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
542 if (nb > mp->m_sb.sb_dblocks)
543 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
544 nb - mp->m_sb.sb_dblocks);
545 if (nfree)
546 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
547 if (dpct)
548 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
Christoph Hellwigf8079b82015-02-05 11:13:21 +1100549 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +1000550 error = xfs_trans_commit(tp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000551 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return error;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 /* New allocation groups fully initialized, so update mount struct */
555 if (nagimax)
556 mp->m_maxagi = nagimax;
557 if (mp->m_sb.sb_imax_pct) {
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700558 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 do_div(icount, 100);
560 mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
561 } else
562 mp->m_maxicount = 0;
Dave Chinner055388a2011-01-04 11:35:03 +1100563 xfs_set_low_space_thresholds(mp);
Darrick J. Wong52548852016-08-03 11:38:24 +1000564 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000565
Darrick J. Wong20e73b02017-01-03 18:39:33 -0800566 /*
567 * If we expanded the last AG, free the per-AG reservation
568 * so we can reinitialize it with the new size.
569 */
570 if (new) {
571 struct xfs_perag *pag;
572
573 pag = xfs_perag_get(mp, agno);
574 error = xfs_ag_resv_free(pag);
575 xfs_perag_put(pag);
576 if (error)
577 goto out;
578 }
579
Darrick J. Wong84d69612016-10-03 09:11:44 -0700580 /* Reserve AG metadata blocks. */
581 error = xfs_fs_reserve_ag_blocks(mp);
582 if (error && error != -ENOSPC)
583 goto out;
584
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000585 /* update secondary superblocks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 for (agno = 1; agno < nagcount; agno++) {
Dave Chinner1375cb62012-10-09 14:50:52 +1100587 error = 0;
588 /*
589 * new secondary superblocks need to be zeroed, not read from
590 * disk as the contents of the new area we are growing into is
591 * completely unknown.
592 */
593 if (agno < oagcount) {
594 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
Dave Chinner98021822012-11-12 22:54:03 +1100596 XFS_FSS_TO_BB(mp, 1), 0, &bp,
Dave Chinner1813dd62012-11-14 17:54:40 +1100597 &xfs_sb_buf_ops);
Dave Chinner1375cb62012-10-09 14:50:52 +1100598 } else {
599 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
600 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
601 XFS_FSS_TO_BB(mp, 1), 0);
Dave Chinnerb0f539d2012-11-14 17:53:49 +1100602 if (bp) {
Dave Chinner1813dd62012-11-14 17:54:40 +1100603 bp->b_ops = &xfs_sb_buf_ops;
Dave Chinner1375cb62012-10-09 14:50:52 +1100604 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
Dave Chinnerb0f539d2012-11-14 17:53:49 +1100605 } else
Dave Chinner24513372014-06-25 14:58:08 +1000606 error = -ENOMEM;
Dave Chinner1375cb62012-10-09 14:50:52 +1100607 }
608
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500609 /*
610 * If we get an error reading or writing alternate superblocks,
611 * continue. xfs_repair chooses the "best" superblock based
612 * on most matches; if we break early, we'll leave more
613 * superblocks un-updated than updated, and xfs_repair may
614 * pick them over the properly-updated primary.
615 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (error) {
Dave Chinner53487782011-03-07 10:05:35 +1100617 xfs_warn(mp,
618 "error %d reading secondary superblock for ag %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 error, agno);
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500620 saved_error = error;
621 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 }
Dave Chinner4d11a402015-01-22 09:10:26 +1100623 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
Dave Chinner98021822012-11-12 22:54:03 +1100624
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000625 error = xfs_bwrite(bp);
626 xfs_buf_relse(bp);
627 if (error) {
Dave Chinner53487782011-03-07 10:05:35 +1100628 xfs_warn(mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 "write error %d updating secondary superblock for ag %d",
630 error, agno);
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500631 saved_error = error;
632 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
634 }
Darrick J. Wong84d69612016-10-03 09:11:44 -0700635
636 out:
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500637 return saved_error ? saved_error : error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 error0:
Christoph Hellwig4906e212015-06-04 13:47:56 +1000640 xfs_trans_cancel(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return error;
642}
643
644static int
645xfs_growfs_log_private(
646 xfs_mount_t *mp, /* mount point for filesystem */
647 xfs_growfs_log_t *in) /* growfs log input struct */
648{
649 xfs_extlen_t nb;
650
651 nb = in->newblocks;
652 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
Dave Chinner24513372014-06-25 14:58:08 +1000653 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (nb == mp->m_sb.sb_logblocks &&
655 in->isint == (mp->m_sb.sb_logstart != 0))
Dave Chinner24513372014-06-25 14:58:08 +1000656 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /*
658 * Moving the log is hard, need new interfaces to sync
659 * the log first, hold off all activity while moving it.
660 * Can have shorter or longer log in the same space,
661 * or transform internal to external log or vice versa.
662 */
Dave Chinner24513372014-06-25 14:58:08 +1000663 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666/*
667 * protected versions of growfs function acquire and release locks on the mount
668 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
669 * XFS_IOC_FSGROWFSRT
670 */
671
672
673int
674xfs_growfs_data(
675 xfs_mount_t *mp,
676 xfs_growfs_data_t *in)
677{
678 int error;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600679
680 if (!capable(CAP_SYS_ADMIN))
Dave Chinner24513372014-06-25 14:58:08 +1000681 return -EPERM;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000682 if (!mutex_trylock(&mp->m_growlock))
Dave Chinner24513372014-06-25 14:58:08 +1000683 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 error = xfs_growfs_data_private(mp, in);
Christoph Hellwig52785112015-02-16 11:49:23 +1100685 /*
686 * Increment the generation unconditionally, the error could be from
687 * updating the secondary superblocks, in which case the new size
688 * is live already.
689 */
690 mp->m_generation++;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000691 mutex_unlock(&mp->m_growlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return error;
693}
694
695int
696xfs_growfs_log(
697 xfs_mount_t *mp,
698 xfs_growfs_log_t *in)
699{
700 int error;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600701
702 if (!capable(CAP_SYS_ADMIN))
Dave Chinner24513372014-06-25 14:58:08 +1000703 return -EPERM;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000704 if (!mutex_trylock(&mp->m_growlock))
Dave Chinner24513372014-06-25 14:58:08 +1000705 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 error = xfs_growfs_log_private(mp, in);
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000707 mutex_unlock(&mp->m_growlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 return error;
709}
710
711/*
712 * exported through ioctl XFS_IOC_FSCOUNTS
713 */
714
715int
716xfs_fs_counts(
717 xfs_mount_t *mp,
718 xfs_fsop_counts_t *cnt)
719{
Dave Chinner501ab322015-02-23 21:19:28 +1100720 cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100721 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100722 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
Darrick J. Wong52548852016-08-03 11:38:24 +1000723 mp->m_alloc_set_aside;
Dave Chinner501ab322015-02-23 21:19:28 +1100724
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000725 spin_lock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 cnt->freertx = mp->m_sb.sb_frextents;
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000727 spin_unlock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return 0;
729}
730
731/*
732 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
733 *
734 * xfs_reserve_blocks is called to set m_resblks
735 * in the in-core mount table. The number of unused reserved blocks
Nathan Scottc41564b2006-03-29 08:55:14 +1000736 * is kept in m_resblks_avail.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 *
738 * Reserve the requested number of blocks if available. Otherwise return
739 * as many as possible to satisfy the request. The actual number
740 * reserved are returned in outval
741 *
742 * A null inval pointer indicates that only the current reserved blocks
743 * available should be returned no settings are changed.
744 */
745
746int
747xfs_reserve_blocks(
748 xfs_mount_t *mp,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700749 uint64_t *inval,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 xfs_fsop_resblks_t *outval)
751{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700752 int64_t lcounter, delta;
753 int64_t fdblks_delta = 0;
754 uint64_t request;
755 int64_t free;
Brian Foster408fd482016-06-21 11:53:28 +1000756 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 /* If inval is null, report current values and return */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700759 if (inval == (uint64_t *)NULL) {
David Chinner84e1e992007-06-18 16:50:27 +1000760 if (!outval)
Dave Chinner24513372014-06-25 14:58:08 +1000761 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 outval->resblks = mp->m_resblks;
763 outval->resblks_avail = mp->m_resblks_avail;
Jesper Juhl014c2542006-01-15 02:37:08 +0100764 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 }
766
767 request = *inval;
David Chinnerdbcabad2007-02-10 18:36:17 +1100768
769 /*
Brian Foster408fd482016-06-21 11:53:28 +1000770 * With per-cpu counters, this becomes an interesting problem. we need
771 * to work out if we are freeing or allocation blocks first, then we can
772 * do the modification as necessary.
David Chinnerdbcabad2007-02-10 18:36:17 +1100773 *
Brian Foster408fd482016-06-21 11:53:28 +1000774 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
775 * hold out any changes while we work out what to do. This means that
776 * the amount of free space can change while we do this, so we need to
777 * retry if we end up trying to reserve more space than is available.
David Chinnerdbcabad2007-02-10 18:36:17 +1100778 */
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000779 spin_lock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 /*
782 * If our previous reservation was larger than the current value,
Brian Foster408fd482016-06-21 11:53:28 +1000783 * then move any unused blocks back to the free pool. Modify the resblks
784 * counters directly since we shouldn't have any problems unreserving
785 * space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (mp->m_resblks > request) {
788 lcounter = mp->m_resblks_avail - request;
789 if (lcounter > 0) { /* release unused blocks */
David Chinnerdbcabad2007-02-10 18:36:17 +1100790 fdblks_delta = lcounter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 mp->m_resblks_avail -= lcounter;
792 }
793 mp->m_resblks = request;
Brian Foster408fd482016-06-21 11:53:28 +1000794 if (fdblks_delta) {
795 spin_unlock(&mp->m_sb_lock);
796 error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
797 spin_lock(&mp->m_sb_lock);
798 }
David Chinner4be536d2006-09-07 14:26:50 +1000799
Brian Foster408fd482016-06-21 11:53:28 +1000800 goto out;
801 }
802
803 /*
804 * If the request is larger than the current reservation, reserve the
805 * blocks before we update the reserve counters. Sample m_fdblocks and
806 * perform a partial reservation if the request exceeds free space.
807 */
808 error = -ENOSPC;
809 do {
Dave Chinner0d485ad2015-02-23 21:22:03 +1100810 free = percpu_counter_sum(&mp->m_fdblocks) -
Darrick J. Wong52548852016-08-03 11:38:24 +1000811 mp->m_alloc_set_aside;
David Chinnerdbcabad2007-02-10 18:36:17 +1100812 if (!free)
Brian Foster408fd482016-06-21 11:53:28 +1000813 break;
David Chinnerdbcabad2007-02-10 18:36:17 +1100814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 delta = request - mp->m_resblks;
David Chinner4be536d2006-09-07 14:26:50 +1000816 lcounter = free - delta;
Brian Foster408fd482016-06-21 11:53:28 +1000817 if (lcounter < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 /* We can't satisfy the request, just get what we can */
Brian Foster408fd482016-06-21 11:53:28 +1000819 fdblks_delta = free;
820 else
821 fdblks_delta = delta;
822
823 /*
824 * We'll either succeed in getting space from the free block
825 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
826 * things changed while we were calculating fdblks_delta and so
827 * we should try again to see if there is anything left to
828 * reserve.
829 *
830 * Don't set the reserved flag here - we don't want to reserve
831 * the extra reserve blocks from the reserve.....
832 */
833 spin_unlock(&mp->m_sb_lock);
834 error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
835 spin_lock(&mp->m_sb_lock);
836 } while (error == -ENOSPC);
837
838 /*
839 * Update the reserve counters if blocks have been successfully
840 * allocated.
841 */
842 if (!error && fdblks_delta) {
843 mp->m_resblks += fdblks_delta;
844 mp->m_resblks_avail += fdblks_delta;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 }
Brian Foster408fd482016-06-21 11:53:28 +1000846
David Chinnerdbcabad2007-02-10 18:36:17 +1100847out:
David Chinner84e1e992007-06-18 16:50:27 +1000848 if (outval) {
849 outval->resblks = mp->m_resblks;
850 outval->resblks_avail = mp->m_resblks_avail;
851 }
David Chinnerdbcabad2007-02-10 18:36:17 +1100852
Brian Foster408fd482016-06-21 11:53:28 +1000853 spin_unlock(&mp->m_sb_lock);
854 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857int
858xfs_fs_goingdown(
859 xfs_mount_t *mp,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700860 uint32_t inflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
862 switch (inflags) {
863 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
Christoph Hellwigb267ce92007-08-30 17:21:30 +1000864 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Christoph Hellwigf33c6792005-11-25 16:41:47 +1100866 if (sb && !IS_ERR(sb)) {
Nathan Scott7d04a332006-06-09 14:58:38 +1000867 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 thaw_bdev(sb->s_bdev, sb);
869 }
Barry Naujok189f4bf2008-05-21 16:58:55 +1000870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 }
873 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
Nathan Scott7d04a332006-06-09 14:58:38 +1000874 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 break;
876 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
Nathan Scott7d04a332006-06-09 14:58:38 +1000877 xfs_force_shutdown(mp,
878 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 break;
880 default:
Dave Chinner24513372014-06-25 14:58:08 +1000881 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
883
884 return 0;
885}
Dave Chinner2af51f32012-04-23 15:59:03 +1000886
887/*
888 * Force a shutdown of the filesystem instantly while keeping the filesystem
889 * consistent. We don't do an unmount here; just shutdown the shop, make sure
890 * that absolutely nothing persistent happens to this filesystem after this
891 * point.
892 */
893void
894xfs_do_force_shutdown(
895 xfs_mount_t *mp,
896 int flags,
897 char *fname,
898 int lnnum)
899{
900 int logerror;
901
902 logerror = flags & SHUTDOWN_LOG_IO_ERROR;
903
904 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
905 xfs_notice(mp,
Darrick J. Wongc9690042018-01-09 12:02:55 -0800906 "%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
Dave Chinner2af51f32012-04-23 15:59:03 +1000907 __func__, flags, lnnum, fname, __return_address);
908 }
909 /*
910 * No need to duplicate efforts.
911 */
912 if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
913 return;
914
915 /*
916 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
917 * queue up anybody new on the log reservations, and wakes up
918 * everybody who's sleeping on log reservations to tell them
919 * the bad news.
920 */
921 if (xfs_log_force_umount(mp, logerror))
922 return;
923
924 if (flags & SHUTDOWN_CORRUPT_INCORE) {
925 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
926 "Corruption of in-memory data detected. Shutting down filesystem");
927 if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
928 xfs_stack_trace();
929 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
930 if (logerror) {
931 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
932 "Log I/O Error Detected. Shutting down filesystem");
933 } else if (flags & SHUTDOWN_DEVICE_REQ) {
934 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
935 "All device paths lost. Shutting down filesystem");
936 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
937 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
938 "I/O Error Detected. Shutting down filesystem");
939 }
940 }
941 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
942 xfs_alert(mp,
943 "Please umount the filesystem and rectify the problem(s)");
944 }
945}
Darrick J. Wong84d69612016-10-03 09:11:44 -0700946
947/*
948 * Reserve free space for per-AG metadata.
949 */
950int
951xfs_fs_reserve_ag_blocks(
952 struct xfs_mount *mp)
953{
954 xfs_agnumber_t agno;
955 struct xfs_perag *pag;
956 int error = 0;
957 int err2;
958
959 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
960 pag = xfs_perag_get(mp, agno);
961 err2 = xfs_ag_resv_init(pag);
962 xfs_perag_put(pag);
963 if (err2 && !error)
964 error = err2;
965 }
966
967 if (error && error != -ENOSPC) {
968 xfs_warn(mp,
969 "Error %d reserving per-AG metadata reserve pool.", error);
970 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
971 }
972
973 return error;
974}
975
976/*
977 * Free space reserved for per-AG metadata.
978 */
979int
980xfs_fs_unreserve_ag_blocks(
981 struct xfs_mount *mp)
982{
983 xfs_agnumber_t agno;
984 struct xfs_perag *pag;
985 int error = 0;
986 int err2;
987
988 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
989 pag = xfs_perag_get(mp, agno);
990 err2 = xfs_ag_resv_free(pag);
991 xfs_perag_put(pag);
992 if (err2 && !error)
993 error = err2;
994 }
995
996 if (error)
997 xfs_warn(mp,
998 "Error %d freeing per-AG metadata reserve pool.", error);
999
1000 return error;
1001}