blob: 63fec11ef2ce3fe8c37ff0f76a923444ee0df7d6 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04004 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
Joe Perchesd77d1b52014-03-06 12:10:45 -08007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Teiglandb3b94fa2006-01-16 16:50:04 +00009#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050014#include <linux/kallsyms.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000015#include <linux/gfs2_ondisk.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000016
17#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000019#include "glock.h"
Steven Whitehouse767f4332012-12-14 12:52:14 +000020#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000021#include "log.h"
22#include "lops.h"
23#include "meta_io.h"
24#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "util.h"
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -050026#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027
Bob Petersonb0be23b2020-07-23 13:14:07 -050028static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
29{
30 fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
31 fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
32 tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
33 test_bit(TR_TOUCHED, &tr->tr_flags));
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010034 fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
Bob Petersonb0be23b2020-07-23 13:14:07 -050035 tr->tr_num_buf_new, tr->tr_num_buf_rm,
36 tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010037 tr->tr_num_revoke);
Bob Petersonb0be23b2020-07-23 13:14:07 -050038}
39
Andreas Gruenbacherc968f572021-01-29 16:45:33 +010040int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
41 unsigned int blocks, unsigned int revokes,
42 unsigned long ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +000043{
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010044 unsigned int extra_revokes;
45
Bob Petersonb0be23b2020-07-23 13:14:07 -050046 if (current->journal_info) {
47 gfs2_print_trans(sdp, current->journal_info);
48 BUG();
49 }
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050050 BUG_ON(blocks == 0 && revokes == 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000051
Steven Whitehousea1c06432009-05-13 10:56:52 +010052 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
53 return -EROFS;
54
Andreas Gruenbacherc968f572021-01-29 16:45:33 +010055 tr->tr_ip = ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +000056 tr->tr_blocks = blocks;
57 tr->tr_revokes = revokes;
Andreas Gruenbacherfe3e3972020-12-10 12:49:56 +010058 tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
Andreas Gruenbacher297de312020-12-06 14:04:36 +010059 if (blocks) {
60 /*
61 * The reserved blocks are either used for data or metadata.
62 * We can have mixed data and metadata, each with its own log
63 * descriptor block; see calc_reserved().
64 */
65 tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
66 }
Steven Whitehoused69a3c62014-02-21 15:22:35 +000067 INIT_LIST_HEAD(&tr->tr_databuf);
68 INIT_LIST_HEAD(&tr->tr_buf);
Bob Peterson462582b2020-08-21 08:50:34 -050069 INIT_LIST_HEAD(&tr->tr_list);
Bob Petersoncbcc89b2020-06-05 14:12:34 -050070 INIT_LIST_HEAD(&tr->tr_ail1_list);
71 INIT_LIST_HEAD(&tr->tr_ail2_list);
Steven Whitehoused69a3c62014-02-21 15:22:35 +000072
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +010073 if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
74 return -EINVAL;
75
Jan Kara39263d5e2012-06-12 16:20:41 +020076 sb_start_intwrite(sdp->sd_vfs);
David Teiglandb3b94fa2006-01-16 16:50:04 +000077
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010078 /*
79 * Try the reservations under sd_log_flush_lock to prevent log flushes
80 * from creating inconsistencies between the number of allocated and
81 * reserved revokes. If that fails, do a full-block allocation outside
82 * of the lock to avoid stalling log flushes. Then, allot the
83 * appropriate number of blocks to revokes, use as many revokes locally
84 * as needed, and "release" the surplus into the revokes pool.
85 */
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +010086
87 down_read(&sdp->sd_log_flush_lock);
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010088 if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
89 goto reserved;
90 up_read(&sdp->sd_log_flush_lock);
91 gfs2_log_reserve(sdp, tr, &extra_revokes);
92 down_read(&sdp->sd_log_flush_lock);
93
94reserved:
95 gfs2_log_release_revokes(sdp, extra_revokes);
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +010096 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
Andreas Gruenbacher2129b422020-12-17 16:14:30 +010097 gfs2_log_release_revokes(sdp, tr->tr_revokes);
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +010098 up_read(&sdp->sd_log_flush_lock);
Andreas Gruenbacher5ae8fff2020-12-13 11:37:17 +010099 gfs2_log_release(sdp, tr->tr_reserved);
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100100 sb_end_intwrite(sdp->sd_vfs);
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +0100101 return -EROFS;
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100102 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000103
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500104 current->journal_info = tr;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000105
106 return 0;
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100107}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000108
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100109int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
110 unsigned int revokes)
111{
112 struct gfs2_trans *tr;
113 int error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000114
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100115 tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
116 if (!tr)
117 return -ENOMEM;
118 error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
119 if (error)
120 kmem_cache_free(gfs2_trans_cachep, tr);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000121 return error;
122}
123
124void gfs2_trans_end(struct gfs2_sbd *sdp)
125{
Steven Whitehousef4154ea2006-04-11 14:49:06 -0400126 struct gfs2_trans *tr = current->journal_info;
Steven Whitehousec50b91c2012-04-16 16:40:56 +0100127 s64 nbuf;
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600128
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500129 current->journal_info = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000130
Bob Peterson9862ca02017-01-25 12:50:47 -0500131 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100132 gfs2_log_release_revokes(sdp, tr->tr_revokes);
Andreas Gruenbacherc1eba1b2020-12-12 23:30:22 +0100133 up_read(&sdp->sd_log_flush_lock);
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100134 gfs2_log_release(sdp, tr->tr_reserved);
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100135 if (!test_bit(TR_ONSTACK, &tr->tr_flags))
Bob Petersonb839dad2019-04-17 12:04:27 -0600136 gfs2_trans_free(sdp, tr);
Andreas Gruenbacher15e20a32021-02-03 16:15:27 +0100137 sb_end_intwrite(sdp->sd_vfs);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000138 return;
139 }
140
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100141 gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
142
Steven Whitehousec50b91c2012-04-16 16:40:56 +0100143 nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
144 nbuf -= tr->tr_num_buf_rm;
145 nbuf -= tr->tr_num_databuf_rm;
146
Andreas Gruenbacher625a8ed2020-12-06 20:10:51 +0100147 if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
148 gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
Bob Petersone54c78a2018-10-03 08:47:36 -0500149 gfs2_print_trans(sdp, tr);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000150
151 gfs2_log_commit(sdp, tr);
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100152 if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
153 !test_bit(TR_ATTACHED, &tr->tr_flags))
Bob Petersonb839dad2019-04-17 12:04:27 -0600154 gfs2_trans_free(sdp, tr);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500155 up_read(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000156
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800157 if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
Bob Peterson805c09072018-01-08 10:34:17 -0500158 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159 GFS2_LFC_TRANS_END);
Andreas Gruenbacher15e20a32021-02-03 16:15:27 +0100160 sb_end_intwrite(sdp->sd_vfs);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000161}
162
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000163static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
Bob Petersoncbbe76c2018-11-16 14:18:32 -0600164 struct buffer_head *bh)
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000165{
166 struct gfs2_bufdata *bd;
167
168 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
169 bd->bd_bh = bh;
170 bd->bd_gl = gl;
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000171 INIT_LIST_HEAD(&bd->bd_list);
Bob Peterson1a5a2cf2021-02-25 11:11:09 -0500172 INIT_LIST_HEAD(&bd->bd_ail_st_list);
173 INIT_LIST_HEAD(&bd->bd_ail_gl_list);
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000174 bh->b_private = bd;
175 return bd;
176}
177
David Teiglandb3b94fa2006-01-16 16:50:04 +0000178/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000179 * gfs2_trans_add_data - Add a databuf to the transaction.
180 * @gl: The inode glock associated with the buffer
181 * @bh: The buffer to add
David Teiglandb3b94fa2006-01-16 16:50:04 +0000182 *
Andreas Gruenbacher845802b2018-06-04 07:50:16 -0500183 * This is used in journaled data mode.
184 * We need to journal the data block in the same way as metadata in
185 * the functions above. The difference is that here we have a tag
186 * which is two __be64's being the block number (as per meta data)
187 * and a flag which says whether the data block needs escaping or
188 * not. This means we need a new log entry for each 251 or so data
189 * blocks, which isn't an enormous overhead but twice as much as
190 * for normal metadata blocks.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000191 */
Steven Whitehouse767f4332012-12-14 12:52:14 +0000192void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
193{
Steven Whitehouse45138992013-01-28 09:30:07 +0000194 struct gfs2_trans *tr = current->journal_info;
Bob Peterson15562c42015-03-16 11:52:05 -0500195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000196 struct gfs2_bufdata *bd;
197
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600198 lock_buffer(bh);
Bob Petersonaacee722017-01-30 11:51:21 -0500199 if (buffer_pinned(bh)) {
200 set_bit(TR_TOUCHED, &tr->tr_flags);
201 goto out;
202 }
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600203 gfs2_log_lock(sdp);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500204 bd = bh->b_private;
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000205 if (bd == NULL) {
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600206 gfs2_log_unlock(sdp);
207 unlock_buffer(bh);
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000208 if (bh->b_private == NULL)
Bob Petersoncbbe76c2018-11-16 14:18:32 -0600209 bd = gfs2_alloc_bufdata(gl, bh);
Bob Peterson491e94f2015-10-01 11:47:31 -0500210 else
211 bd = bh->b_private;
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600212 lock_buffer(bh);
213 gfs2_log_lock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000214 }
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000215 gfs2_assert(sdp, bd->bd_gl == gl);
Bob Peterson9862ca02017-01-25 12:50:47 -0500216 set_bit(TR_TOUCHED, &tr->tr_flags);
Steven Whitehouse45138992013-01-28 09:30:07 +0000217 if (list_empty(&bd->bd_list)) {
218 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
219 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
220 gfs2_pin(sdp, bd->bd_bh);
221 tr->tr_num_databuf_new++;
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000222 list_add_tail(&bd->bd_list, &tr->tr_databuf);
Steven Whitehouse45138992013-01-28 09:30:07 +0000223 }
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600224 gfs2_log_unlock(sdp);
Bob Petersonaacee722017-01-30 11:51:21 -0500225out:
Benjamin Marzinski96e5d1d2012-11-07 00:38:06 -0600226 unlock_buffer(bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000227}
228
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000229void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
230{
Steven Whitehouse767f4332012-12-14 12:52:14 +0000231
Bob Peterson15562c42015-03-16 11:52:05 -0500232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse767f4332012-12-14 12:52:14 +0000233 struct gfs2_bufdata *bd;
Bob Peterson192738b2017-01-25 12:57:42 -0500234 struct gfs2_meta_header *mh;
Bob Petersonaacee722017-01-30 11:51:21 -0500235 struct gfs2_trans *tr = current->journal_info;
Bob Peterson192738b2017-01-25 12:57:42 -0500236 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
Steven Whitehouse767f4332012-12-14 12:52:14 +0000237
238 lock_buffer(bh);
Bob Petersonaacee722017-01-30 11:51:21 -0500239 if (buffer_pinned(bh)) {
240 set_bit(TR_TOUCHED, &tr->tr_flags);
241 goto out;
242 }
Steven Whitehouse767f4332012-12-14 12:52:14 +0000243 gfs2_log_lock(sdp);
244 bd = bh->b_private;
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000245 if (bd == NULL) {
Steven Whitehouse767f4332012-12-14 12:52:14 +0000246 gfs2_log_unlock(sdp);
247 unlock_buffer(bh);
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000248 lock_page(bh->b_page);
249 if (bh->b_private == NULL)
Bob Petersoncbbe76c2018-11-16 14:18:32 -0600250 bd = gfs2_alloc_bufdata(gl, bh);
Bob Peterson491e94f2015-10-01 11:47:31 -0500251 else
252 bd = bh->b_private;
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000253 unlock_page(bh->b_page);
Steven Whitehouse767f4332012-12-14 12:52:14 +0000254 lock_buffer(bh);
255 gfs2_log_lock(sdp);
256 }
Steven Whitehousec76c4d92012-12-14 17:54:21 +0000257 gfs2_assert(sdp, bd->bd_gl == gl);
Bob Peterson192738b2017-01-25 12:57:42 -0500258 set_bit(TR_TOUCHED, &tr->tr_flags);
259 if (!list_empty(&bd->bd_list))
260 goto out_unlock;
261 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
262 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
263 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
264 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
Bob Petersone54c78a2018-10-03 08:47:36 -0500265 fs_err(sdp, "Attempting to add uninitialised block to "
266 "journal (inplace block=%lld)\n",
Bob Peterson192738b2017-01-25 12:57:42 -0500267 (unsigned long long)bd->bd_bh->b_blocknr);
268 BUG();
269 }
270 if (unlikely(state == SFS_FROZEN)) {
Bob Petersone54c78a2018-10-03 08:47:36 -0500271 fs_info(sdp, "GFS2:adding buf while frozen\n");
Bob Peterson192738b2017-01-25 12:57:42 -0500272 gfs2_assert_withdraw(sdp, 0);
273 }
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600274 if (unlikely(gfs2_withdrawn(sdp))) {
275 fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
276 (unsigned long long)bd->bd_bh->b_blocknr);
277 }
Bob Peterson192738b2017-01-25 12:57:42 -0500278 gfs2_pin(sdp, bd->bd_bh);
279 mh->__pad0 = cpu_to_be64(0);
280 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
281 list_add(&bd->bd_list, &tr->tr_buf);
282 tr->tr_num_buf_new++;
283out_unlock:
Steven Whitehouse767f4332012-12-14 12:52:14 +0000284 gfs2_log_unlock(sdp);
Bob Petersonaacee722017-01-30 11:51:21 -0500285out:
Steven Whitehouse767f4332012-12-14 12:52:14 +0000286 unlock_buffer(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +0000287}
288
Steven Whitehouse1ad38c42007-09-03 11:01:33 +0100289void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000290{
Steven Whitehouse75f2b872012-12-14 12:29:56 +0000291 struct gfs2_trans *tr = current->journal_info;
292
Bob Petersonc0752aa2012-05-01 12:00:34 -0400293 BUG_ON(!list_empty(&bd->bd_list));
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500294 gfs2_add_revoke(sdp, bd);
Bob Peterson9862ca02017-01-25 12:50:47 -0500295 set_bit(TR_TOUCHED, &tr->tr_flags);
Steven Whitehouse75f2b872012-12-14 12:29:56 +0000296 tr->tr_num_revoke++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000297}
298
Andreas Gruenbacherfbb27872019-04-05 12:18:23 +0100299void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000300{
Steven Whitehouse5731be52008-02-01 13:16:55 +0000301 struct gfs2_bufdata *bd, *tmp;
Steven Whitehouse5731be52008-02-01 13:16:55 +0000302 unsigned int n = len;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000303
304 gfs2_log_lock(sdp);
Andreas Gruenbachera5b1d3f2019-04-05 12:16:14 +0100305 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
Steven Whitehouse5731be52008-02-01 13:16:55 +0000306 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400307 list_del_init(&bd->bd_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000308 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
309 sdp->sd_log_num_revoke--;
Bob Petersonfe5e7ba2019-11-14 09:49:11 -0500310 if (bd->bd_gl)
311 gfs2_glock_remove_revoke(bd->bd_gl);
Steven Whitehouse5731be52008-02-01 13:16:55 +0000312 kmem_cache_free(gfs2_bufdata_cachep, bd);
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100313 gfs2_log_release_revokes(sdp, 1);
Steven Whitehouse5731be52008-02-01 13:16:55 +0000314 if (--n == 0)
315 break;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000316 }
317 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000318 gfs2_log_unlock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000319}
320
Bob Petersonb839dad2019-04-17 12:04:27 -0600321void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
322{
323 if (tr == NULL)
324 return;
325
326 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
327 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
328 gfs2_assert_warn(sdp, list_empty(&tr->tr_databuf));
329 gfs2_assert_warn(sdp, list_empty(&tr->tr_buf));
330 kmem_cache_free(gfs2_trans_cachep, tr);
331}