Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010 Red Hat, Inc. |
| 4 | * All Rights Reserved. |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 5 | */ |
| 6 | #include "xfs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 7 | #include "xfs_shared.h" |
Dave Chinner | 6ca1c90 | 2013-08-12 20:49:26 +1000 | [diff] [blame] | 8 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 9 | #include "xfs_log_format.h" |
| 10 | #include "xfs_trans_resv.h" |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 11 | #include "xfs_mount.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 12 | #include "xfs_btree.h" |
| 13 | #include "xfs_alloc_btree.h" |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 14 | #include "xfs_alloc.h" |
Darrick J. Wong | 5f213dd | 2019-11-06 17:19:33 -0800 | [diff] [blame] | 15 | #include "xfs_discard.h" |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 16 | #include "xfs_error.h" |
Dave Chinner | efc27b5 | 2012-04-29 10:39:43 +0000 | [diff] [blame] | 17 | #include "xfs_extent_busy.h" |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 18 | #include "xfs_trace.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 19 | #include "xfs_log.h" |
Dave Chinner | 9bbafc71 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 20 | #include "xfs_ag.h" |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 21 | |
| 22 | STATIC int |
| 23 | xfs_trim_extents( |
| 24 | struct xfs_mount *mp, |
| 25 | xfs_agnumber_t agno, |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 26 | xfs_daddr_t start, |
| 27 | xfs_daddr_t end, |
| 28 | xfs_daddr_t minlen, |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 29 | uint64_t *blocks_trimmed) |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 30 | { |
| 31 | struct block_device *bdev = mp->m_ddev_targp->bt_bdev; |
| 32 | struct xfs_btree_cur *cur; |
| 33 | struct xfs_buf *agbp; |
Christoph Hellwig | 9798f61 | 2020-03-10 08:57:29 -0700 | [diff] [blame] | 34 | struct xfs_agf *agf; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 35 | struct xfs_perag *pag; |
| 36 | int error; |
| 37 | int i; |
| 38 | |
| 39 | pag = xfs_perag_get(mp, agno); |
| 40 | |
Carlos Maiolino | 8c81dd4 | 2018-04-10 22:39:04 -0700 | [diff] [blame] | 41 | /* |
| 42 | * Force out the log. This means any transactions that might have freed |
| 43 | * space before we take the AGF buffer lock are now on disk, and the |
| 44 | * volatile disk cache is flushed. |
| 45 | */ |
| 46 | xfs_log_force(mp, XFS_LOG_SYNC); |
| 47 | |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 48 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); |
Darrick J. Wong | 706b8c5b | 2020-01-23 17:01:20 -0800 | [diff] [blame] | 49 | if (error) |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 50 | goto out_put_perag; |
Christoph Hellwig | 9798f61 | 2020-03-10 08:57:29 -0700 | [diff] [blame] | 51 | agf = agbp->b_addr; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 52 | |
Dave Chinner | 289d38d | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 53 | cur = xfs_allocbt_init_cursor(mp, NULL, agbp, pag, XFS_BTNUM_CNT); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 54 | |
| 55 | /* |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 56 | * Look up the longest btree in the AGF and start with it. |
| 57 | */ |
Christoph Hellwig | 9798f61 | 2020-03-10 08:57:29 -0700 | [diff] [blame] | 58 | error = xfs_alloc_lookup_ge(cur, 0, be32_to_cpu(agf->agf_longest), &i); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 59 | if (error) |
| 60 | goto out_del_cursor; |
| 61 | |
| 62 | /* |
| 63 | * Loop until we are done with all extents that are large |
| 64 | * enough to be worth discarding. |
| 65 | */ |
| 66 | while (i) { |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 67 | xfs_agblock_t fbno; |
| 68 | xfs_extlen_t flen; |
| 69 | xfs_daddr_t dbno; |
| 70 | xfs_extlen_t dlen; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 71 | |
| 72 | error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); |
| 73 | if (error) |
| 74 | goto out_del_cursor; |
Darrick J. Wong | f9e0370 | 2019-11-11 12:52:18 -0800 | [diff] [blame] | 75 | if (XFS_IS_CORRUPT(mp, i != 1)) { |
| 76 | error = -EFSCORRUPTED; |
| 77 | goto out_del_cursor; |
| 78 | } |
Christoph Hellwig | 9798f61 | 2020-03-10 08:57:29 -0700 | [diff] [blame] | 79 | ASSERT(flen <= be32_to_cpu(agf->agf_longest)); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 80 | |
| 81 | /* |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 82 | * use daddr format for all range/len calculations as that is |
| 83 | * the format the range/len variables are supplied in by |
| 84 | * userspace. |
| 85 | */ |
| 86 | dbno = XFS_AGB_TO_DADDR(mp, agno, fbno); |
| 87 | dlen = XFS_FSB_TO_BB(mp, flen); |
| 88 | |
| 89 | /* |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 90 | * Too small? Give up. |
| 91 | */ |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 92 | if (dlen < minlen) { |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 93 | trace_xfs_discard_toosmall(mp, agno, fbno, flen); |
| 94 | goto out_del_cursor; |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * If the extent is entirely outside of the range we are |
| 99 | * supposed to discard skip it. Do not bother to trim |
| 100 | * down partially overlapping ranges for now. |
| 101 | */ |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 102 | if (dbno + dlen < start || dbno > end) { |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 103 | trace_xfs_discard_exclude(mp, agno, fbno, flen); |
| 104 | goto next_extent; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * If any blocks in the range are still busy, skip the |
| 109 | * discard and try again the next time. |
| 110 | */ |
Dave Chinner | 45d0662 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 111 | if (xfs_extent_busy_search(mp, pag, fbno, flen)) { |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 112 | trace_xfs_discard_busy(mp, agno, fbno, flen); |
| 113 | goto next_extent; |
| 114 | } |
| 115 | |
| 116 | trace_xfs_discard_extent(mp, agno, fbno, flen); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 117 | error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 118 | if (error) |
| 119 | goto out_del_cursor; |
| 120 | *blocks_trimmed += flen; |
| 121 | |
| 122 | next_extent: |
| 123 | error = xfs_btree_decrement(cur, 0, &i); |
| 124 | if (error) |
| 125 | goto out_del_cursor; |
Lukas Czerner | 3c37819 | 2017-04-27 08:59:36 -0700 | [diff] [blame] | 126 | |
| 127 | if (fatal_signal_pending(current)) { |
| 128 | error = -ERESTARTSYS; |
| 129 | goto out_del_cursor; |
| 130 | } |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | out_del_cursor: |
Darrick J. Wong | 0b04b6b8 | 2018-07-19 12:26:31 -0700 | [diff] [blame] | 134 | xfs_btree_del_cursor(cur, error); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 135 | xfs_buf_relse(agbp); |
| 136 | out_put_perag: |
| 137 | xfs_perag_put(pag); |
| 138 | return error; |
| 139 | } |
| 140 | |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 141 | /* |
| 142 | * trim a range of the filesystem. |
| 143 | * |
| 144 | * Note: the parameters passed from userspace are byte ranges into the |
| 145 | * filesystem which does not match to the format we use for filesystem block |
| 146 | * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format |
| 147 | * is a linear address range. Hence we need to use DADDR based conversions and |
| 148 | * comparisons for determining the correct offset and regions to trim. |
| 149 | */ |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 150 | int |
| 151 | xfs_ioc_trim( |
| 152 | struct xfs_mount *mp, |
| 153 | struct fstrim_range __user *urange) |
| 154 | { |
Jie Liu | 2f42d61 | 2013-11-20 16:08:53 +0800 | [diff] [blame] | 155 | struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 156 | unsigned int granularity = q->limits.discard_granularity; |
| 157 | struct fstrim_range range; |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 158 | xfs_daddr_t start, end, minlen; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 159 | xfs_agnumber_t start_agno, end_agno, agno; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 160 | uint64_t blocks_trimmed = 0; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 161 | int error, last_error = 0; |
| 162 | |
| 163 | if (!capable(CAP_SYS_ADMIN)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 164 | return -EPERM; |
Lukas Czerner | be71514 | 2011-02-15 17:07:36 +0000 | [diff] [blame] | 165 | if (!blk_queue_discard(q)) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 166 | return -EOPNOTSUPP; |
Darrick J. Wong | ed79dac | 2019-03-22 18:10:22 -0700 | [diff] [blame] | 167 | |
| 168 | /* |
| 169 | * We haven't recovered the log, so we cannot use our bnobt-guided |
| 170 | * storage zapping commands. |
| 171 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 172 | if (xfs_has_norecovery(mp)) |
Darrick J. Wong | ed79dac | 2019-03-22 18:10:22 -0700 | [diff] [blame] | 173 | return -EROFS; |
| 174 | |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 175 | if (copy_from_user(&range, urange, sizeof(range))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 176 | return -EFAULT; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 177 | |
Wang Shilong | 2bf9d26 | 2019-04-12 07:39:21 -0700 | [diff] [blame] | 178 | range.minlen = max_t(u64, granularity, range.minlen); |
| 179 | minlen = BTOBB(range.minlen); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 180 | /* |
| 181 | * Truncating down the len isn't actually quite correct, but using |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 182 | * BBTOB would mean we trivially get overflows for values |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 183 | * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default |
| 184 | * used by the fstrim application. In the end it really doesn't |
| 185 | * matter as trimming blocks is an advisory interface. |
| 186 | */ |
Tomas Racek | a672e1b | 2012-08-14 10:35:04 +0200 | [diff] [blame] | 187 | if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || |
Darrick J. Wong | 5254885 | 2016-08-03 11:38:24 +1000 | [diff] [blame] | 188 | range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) || |
Jie Liu | 2f42d61 | 2013-11-20 16:08:53 +0800 | [diff] [blame] | 189 | range.len < mp->m_sb.sb_blocksize) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 190 | return -EINVAL; |
Tomas Racek | a672e1b | 2012-08-14 10:35:04 +0200 | [diff] [blame] | 191 | |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 192 | start = BTOBB(range.start); |
| 193 | end = start + BTOBBT(range.len) - 1; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 194 | |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 195 | if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) |
| 196 | end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 197 | |
Dave Chinner | a66d636 | 2012-03-22 05:15:12 +0000 | [diff] [blame] | 198 | start_agno = xfs_daddr_to_agno(mp, start); |
| 199 | end_agno = xfs_daddr_to_agno(mp, end); |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 200 | |
| 201 | for (agno = start_agno; agno <= end_agno; agno++) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 202 | error = xfs_trim_extents(mp, agno, start, end, minlen, |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 203 | &blocks_trimmed); |
Lukas Czerner | 3c37819 | 2017-04-27 08:59:36 -0700 | [diff] [blame] | 204 | if (error) { |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 205 | last_error = error; |
Lukas Czerner | 3c37819 | 2017-04-27 08:59:36 -0700 | [diff] [blame] | 206 | if (error == -ERESTARTSYS) |
| 207 | break; |
| 208 | } |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | if (last_error) |
| 212 | return last_error; |
| 213 | |
| 214 | range.len = XFS_FSB_TO_B(mp, blocks_trimmed); |
| 215 | if (copy_to_user(urange, &range, sizeof(range))) |
Eric Sandeen | b474c7a | 2014-06-22 15:04:54 +1000 | [diff] [blame] | 216 | return -EFAULT; |
Christoph Hellwig | a46db60 | 2011-01-07 13:02:04 +0000 | [diff] [blame] | 217 | return 0; |
| 218 | } |