blob: 46dd4fca8aa772e97cd75f21a6fb2a9932308958 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wongf997ee22016-10-03 09:11:21 -07002/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
Darrick J. Wongf997ee22016-10-03 09:11:21 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wongf997ee22016-10-03 09:11:21 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_defer.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_refcount_item.h"
17#include "xfs_alloc.h"
18#include "xfs_refcount.h"
19
20/*
21 * This routine is called to allocate a "refcount update done"
22 * log item.
23 */
24struct xfs_cud_log_item *
25xfs_trans_get_cud(
26 struct xfs_trans *tp,
27 struct xfs_cui_log_item *cuip)
28{
29 struct xfs_cud_log_item *cudp;
30
31 cudp = xfs_cud_init(tp->t_mountp, cuip);
32 xfs_trans_add_item(tp, &cudp->cud_item);
33 return cudp;
34}
35
36/*
37 * Finish an refcount update and log it to the CUD. Note that the
38 * transaction is marked dirty regardless of whether the refcount
39 * update succeeds or fails to support the CUI/CUD lifecycle rules.
40 */
41int
42xfs_trans_log_finish_refcount_update(
43 struct xfs_trans *tp,
44 struct xfs_cud_log_item *cudp,
Darrick J. Wong33ba61292016-10-03 09:11:22 -070045 struct xfs_defer_ops *dop,
Darrick J. Wongf997ee22016-10-03 09:11:21 -070046 enum xfs_refcount_intent_type type,
47 xfs_fsblock_t startblock,
48 xfs_extlen_t blockcount,
Darrick J. Wong33ba61292016-10-03 09:11:22 -070049 xfs_fsblock_t *new_fsb,
50 xfs_extlen_t *new_len,
Darrick J. Wongf997ee22016-10-03 09:11:21 -070051 struct xfs_btree_cur **pcur)
52{
53 int error;
54
Darrick J. Wong33ba61292016-10-03 09:11:22 -070055 error = xfs_refcount_finish_one(tp, dop, type, startblock,
56 blockcount, new_fsb, new_len, pcur);
Darrick J. Wongf997ee22016-10-03 09:11:21 -070057
58 /*
59 * Mark the transaction dirty, even on error. This ensures the
60 * transaction is aborted, which:
61 *
62 * 1.) releases the CUI and frees the CUD
63 * 2.) shuts down the filesystem
64 */
65 tp->t_flags |= XFS_TRANS_DIRTY;
Dave Chinnere6631f82018-05-09 07:49:37 -070066 set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
Darrick J. Wongf997ee22016-10-03 09:11:21 -070067
68 return error;
69}
Darrick J. Wong33ba61292016-10-03 09:11:22 -070070
71/* Sort refcount intents by AG. */
72static int
73xfs_refcount_update_diff_items(
74 void *priv,
75 struct list_head *a,
76 struct list_head *b)
77{
78 struct xfs_mount *mp = priv;
79 struct xfs_refcount_intent *ra;
80 struct xfs_refcount_intent *rb;
81
82 ra = container_of(a, struct xfs_refcount_intent, ri_list);
83 rb = container_of(b, struct xfs_refcount_intent, ri_list);
84 return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
85 XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
86}
87
88/* Get an CUI. */
89STATIC void *
90xfs_refcount_update_create_intent(
91 struct xfs_trans *tp,
92 unsigned int count)
93{
94 struct xfs_cui_log_item *cuip;
95
96 ASSERT(tp != NULL);
97 ASSERT(count > 0);
98
99 cuip = xfs_cui_init(tp->t_mountp, count);
100 ASSERT(cuip != NULL);
101
102 /*
103 * Get a log_item_desc to point at the new item.
104 */
105 xfs_trans_add_item(tp, &cuip->cui_item);
106 return cuip;
107}
108
109/* Set the phys extent flags for this reverse mapping. */
110static void
111xfs_trans_set_refcount_flags(
112 struct xfs_phys_extent *refc,
113 enum xfs_refcount_intent_type type)
114{
115 refc->pe_flags = 0;
116 switch (type) {
117 case XFS_REFCOUNT_INCREASE:
118 case XFS_REFCOUNT_DECREASE:
119 case XFS_REFCOUNT_ALLOC_COW:
120 case XFS_REFCOUNT_FREE_COW:
121 refc->pe_flags |= type;
122 break;
123 default:
124 ASSERT(0);
125 }
126}
127
128/* Log refcount updates in the intent item. */
129STATIC void
130xfs_refcount_update_log_item(
131 struct xfs_trans *tp,
132 void *intent,
133 struct list_head *item)
134{
135 struct xfs_cui_log_item *cuip = intent;
136 struct xfs_refcount_intent *refc;
137 uint next_extent;
138 struct xfs_phys_extent *ext;
139
140 refc = container_of(item, struct xfs_refcount_intent, ri_list);
141
142 tp->t_flags |= XFS_TRANS_DIRTY;
Dave Chinnere6631f82018-05-09 07:49:37 -0700143 set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
Darrick J. Wong33ba61292016-10-03 09:11:22 -0700144
145 /*
146 * atomic_inc_return gives us the value after the increment;
147 * we want to use it as an array index so we need to subtract 1 from
148 * it.
149 */
150 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
151 ASSERT(next_extent < cuip->cui_format.cui_nextents);
152 ext = &cuip->cui_format.cui_extents[next_extent];
153 ext->pe_startblock = refc->ri_startblock;
154 ext->pe_len = refc->ri_blockcount;
155 xfs_trans_set_refcount_flags(ext, refc->ri_type);
156}
157
158/* Get an CUD so we can process all the deferred refcount updates. */
159STATIC void *
160xfs_refcount_update_create_done(
161 struct xfs_trans *tp,
162 void *intent,
163 unsigned int count)
164{
165 return xfs_trans_get_cud(tp, intent);
166}
167
168/* Process a deferred refcount update. */
169STATIC int
170xfs_refcount_update_finish_item(
171 struct xfs_trans *tp,
172 struct xfs_defer_ops *dop,
173 struct list_head *item,
174 void *done_item,
175 void **state)
176{
177 struct xfs_refcount_intent *refc;
178 xfs_fsblock_t new_fsb;
179 xfs_extlen_t new_aglen;
180 int error;
181
182 refc = container_of(item, struct xfs_refcount_intent, ri_list);
183 error = xfs_trans_log_finish_refcount_update(tp, done_item, dop,
184 refc->ri_type,
185 refc->ri_startblock,
186 refc->ri_blockcount,
187 &new_fsb, &new_aglen,
188 (struct xfs_btree_cur **)state);
189 /* Did we run out of reservation? Requeue what we didn't finish. */
190 if (!error && new_aglen > 0) {
191 ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
192 refc->ri_type == XFS_REFCOUNT_DECREASE);
193 refc->ri_startblock = new_fsb;
194 refc->ri_blockcount = new_aglen;
195 return -EAGAIN;
196 }
197 kmem_free(refc);
198 return error;
199}
200
201/* Clean up after processing deferred refcounts. */
202STATIC void
203xfs_refcount_update_finish_cleanup(
204 struct xfs_trans *tp,
205 void *state,
206 int error)
207{
208 struct xfs_btree_cur *rcur = state;
209
210 xfs_refcount_finish_one_cleanup(tp, rcur, error);
211}
212
213/* Abort all pending CUIs. */
214STATIC void
215xfs_refcount_update_abort_intent(
216 void *intent)
217{
218 xfs_cui_release(intent);
219}
220
221/* Cancel a deferred refcount update. */
222STATIC void
223xfs_refcount_update_cancel_item(
224 struct list_head *item)
225{
226 struct xfs_refcount_intent *refc;
227
228 refc = container_of(item, struct xfs_refcount_intent, ri_list);
229 kmem_free(refc);
230}
231
232static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
233 .type = XFS_DEFER_OPS_TYPE_REFCOUNT,
234 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
235 .diff_items = xfs_refcount_update_diff_items,
236 .create_intent = xfs_refcount_update_create_intent,
237 .abort_intent = xfs_refcount_update_abort_intent,
238 .log_item = xfs_refcount_update_log_item,
239 .create_done = xfs_refcount_update_create_done,
240 .finish_item = xfs_refcount_update_finish_item,
241 .finish_cleanup = xfs_refcount_update_finish_cleanup,
242 .cancel_item = xfs_refcount_update_cancel_item,
243};
244
245/* Register the deferred op type. */
246void
247xfs_refcount_update_init_defer_op(void)
248{
249 xfs_defer_init_op_type(&xfs_refcount_update_defer_type);
250}