blob: 010c319caade4c7b388ccdc3247bf3cec4a82e35 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonda6dd402007-12-11 18:49:21 -06004 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050012#include <linux/gfs2_ondisk.h>
Steven Whitehouse71b86f52006-03-28 14:14:04 -050013#include <linux/crc32.h>
Bob Petersonc1696fb2018-01-17 00:01:33 +010014#include <linux/crc32c.h>
Steven Whitehousea25311c2006-11-23 11:06:35 -050015#include <linux/delay.h>
Steven Whitehouseec69b182007-11-09 10:01:41 +000016#include <linux/kthread.h>
17#include <linux/freezer.h>
Steven Whitehouse254db572008-09-26 10:23:22 +010018#include <linux/bio.h>
Steven Whitehouse885bcec2014-02-03 09:57:29 +000019#include <linux/blkdev.h>
Steven Whitehouse4667a0e2011-04-18 14:18:09 +010020#include <linux/writeback.h>
Bob Peterson4a36d082012-02-14 14:49:57 -050021#include <linux/list_sort.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000022
23#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050024#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include "bmap.h"
26#include "glock.h"
27#include "log.h"
28#include "lops.h"
29#include "meta_io.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050030#include "util.h"
Steven Whitehouse71b86f52006-03-28 14:14:04 -050031#include "dir.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010032#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000033
Bob Petersonfeed98a2019-11-14 09:48:26 -050034static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
35
David Teiglandb3b94fa2006-01-16 16:50:04 +000036/**
37 * gfs2_struct2blk - compute stuff
38 * @sdp: the filesystem
39 * @nstruct: the number of structures
David Teiglandb3b94fa2006-01-16 16:50:04 +000040 *
41 * Compute the number of log descriptor blocks needed to hold a certain number
42 * of structures of a certain size.
43 *
44 * Returns: the number of blocks needed (minimum is always 1)
45 */
46
Bob Peterson2e9eeaa2019-12-13 08:10:51 -060047unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
David Teiglandb3b94fa2006-01-16 16:50:04 +000048{
49 unsigned int blks;
50 unsigned int first, second;
51
52 blks = 1;
Bob Peterson2e9eeaa2019-12-13 08:10:51 -060053 first = sdp->sd_ldptrs;
David Teiglandb3b94fa2006-01-16 16:50:04 +000054
55 if (nstruct > first) {
Bob Peterson2e9eeaa2019-12-13 08:10:51 -060056 second = sdp->sd_inptrs;
Steven Whitehouse5c676f62006-02-27 17:23:27 -050057 blks += DIV_ROUND_UP(nstruct - first, second);
David Teiglandb3b94fa2006-01-16 16:50:04 +000058 }
59
60 return blks;
61}
62
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040063/**
Steven Whitehouse1e1a3d02007-08-27 09:45:26 +010064 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
65 * @mapping: The associated mapping (maybe NULL)
66 * @bd: The gfs2_bufdata to remove
67 *
Steven Whitehousec618e872011-03-14 12:40:29 +000068 * The ail lock _must_ be held when calling this function
Steven Whitehouse1e1a3d02007-08-27 09:45:26 +010069 *
70 */
71
Bob Peterson9bc980c2018-03-02 06:59:44 -070072static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
Steven Whitehouse1e1a3d02007-08-27 09:45:26 +010073{
Benjamin Marzinski16ca9412013-04-05 20:31:46 -050074 bd->bd_tr = NULL;
Steven Whitehouse1ad38c42007-09-03 11:01:33 +010075 list_del_init(&bd->bd_ail_st_list);
76 list_del_init(&bd->bd_ail_gl_list);
Steven Whitehouse1e1a3d02007-08-27 09:45:26 +010077 atomic_dec(&bd->bd_gl->gl_ail_count);
Steven Whitehouse1e1a3d02007-08-27 09:45:26 +010078 brelse(bd->bd_bh);
79}
80
81/**
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040082 * gfs2_ail1_start_one - Start I/O on a part of the AIL
83 * @sdp: the filesystem
Steven Whitehouse4667a0e2011-04-18 14:18:09 +010084 * @wbc: The writeback control structure
85 * @ai: The ail structure
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040086 *
87 */
88
Steven Whitehouse4f1de012011-04-26 10:23:56 +010089static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
90 struct writeback_control *wbc,
Bob Peterson69511082019-02-12 13:43:55 -070091 struct gfs2_trans *tr)
Dave Chinnerd6a079e2011-03-11 11:52:25 +000092__releases(&sdp->sd_ail_lock)
93__acquires(&sdp->sd_ail_lock)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040094{
Steven Whitehouse5ac048b2011-03-30 16:25:51 +010095 struct gfs2_glock *gl = NULL;
Steven Whitehouse4667a0e2011-04-18 14:18:09 +010096 struct address_space *mapping;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040097 struct gfs2_bufdata *bd, *s;
98 struct buffer_head *bh;
Bob Petersonb1676cb2019-11-13 13:53:42 -060099 int ret = 0;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400100
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500101 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100102 bh = bd->bd_bh;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400103
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500104 gfs2_assert(sdp, bd->bd_tr == tr);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400105
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100106 if (!buffer_busy(bh)) {
Bob Peterson30fe70a2019-11-13 11:47:09 -0600107 if (buffer_uptodate(bh)) {
108 list_move(&bd->bd_ail_st_list,
109 &tr->tr_ail2_list);
110 continue;
111 }
Bob Peterson036330c2019-04-10 11:46:35 -0600112 if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100113 gfs2_io_error_bh(sdp, bh);
Bob Peterson69511082019-02-12 13:43:55 -0700114 gfs2_withdraw_delayed(sdp);
Andreas Gruenbacher9e1a9ec2018-06-07 11:56:46 +0100115 }
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400116 }
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100117
Bob Peterson30fe70a2019-11-13 11:47:09 -0600118 if (gfs2_withdrawn(sdp)) {
119 gfs2_remove_from_ail(bd);
120 continue;
121 }
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100122 if (!buffer_dirty(bh))
123 continue;
124 if (gl == bd->bd_gl)
125 continue;
126 gl = bd->bd_gl;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500127 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100128 mapping = bh->b_page->mapping;
Steven Whitehouse4f1de012011-04-26 10:23:56 +0100129 if (!mapping)
130 continue;
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100131 spin_unlock(&sdp->sd_ail_lock);
Bob Petersonb1676cb2019-11-13 13:53:42 -0600132 ret = generic_writepages(mapping, wbc);
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100133 spin_lock(&sdp->sd_ail_lock);
Bob Petersonb1676cb2019-11-13 13:53:42 -0600134 if (ret || wbc->nr_to_write <= 0)
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100135 break;
Bob Petersonb1676cb2019-11-13 13:53:42 -0600136 return -EBUSY;
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100137 }
Steven Whitehouse4f1de012011-04-26 10:23:56 +0100138
Bob Petersonb1676cb2019-11-13 13:53:42 -0600139 return ret;
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100140}
141
142
143/**
144 * gfs2_ail1_flush - start writeback of some ail1 entries
145 * @sdp: The super block
146 * @wbc: The writeback control structure
147 *
148 * Writes back some ail1 entries, according to the limits in the
149 * writeback control structure
150 */
151
152void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
153{
154 struct list_head *head = &sdp->sd_ail1_list;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500155 struct gfs2_trans *tr;
Steven Whitehouse885bcec2014-02-03 09:57:29 +0000156 struct blk_plug plug;
Bob Petersonb1676cb2019-11-13 13:53:42 -0600157 int ret = 0;
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100158
Steven Whitehousec83ae9c2011-04-18 14:18:38 +0100159 trace_gfs2_ail_flush(sdp, wbc, 1);
Steven Whitehouse885bcec2014-02-03 09:57:29 +0000160 blk_start_plug(&plug);
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100161 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4f1de012011-04-26 10:23:56 +0100162restart:
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500163 list_for_each_entry_reverse(tr, head, tr_list) {
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100164 if (wbc->nr_to_write <= 0)
165 break;
Bob Petersonb1676cb2019-11-13 13:53:42 -0600166 ret = gfs2_ail1_start_one(sdp, wbc, tr);
167 if (ret) {
168 if (ret == -EBUSY)
169 goto restart;
170 break;
171 }
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100172 }
173 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse885bcec2014-02-03 09:57:29 +0000174 blk_finish_plug(&plug);
Bob Petersonb1676cb2019-11-13 13:53:42 -0600175 if (ret)
Andreas Gruenbacherbadb55e2020-01-23 18:41:00 +0100176 gfs2_withdraw(sdp);
Steven Whitehousec83ae9c2011-04-18 14:18:38 +0100177 trace_gfs2_ail_flush(sdp, wbc, 0);
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100178}
179
180/**
181 * gfs2_ail1_start - start writeback of all ail1 entries
182 * @sdp: The superblock
183 */
184
185static void gfs2_ail1_start(struct gfs2_sbd *sdp)
186{
187 struct writeback_control wbc = {
188 .sync_mode = WB_SYNC_NONE,
189 .nr_to_write = LONG_MAX,
190 .range_start = 0,
191 .range_end = LLONG_MAX,
192 };
193
194 return gfs2_ail1_flush(sdp, &wbc);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400195}
196
197/**
198 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
199 * @sdp: the filesystem
Bob Peterson5e4c7632019-02-21 14:28:07 -0700200 * @tr: the transaction
201 * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400202 *
203 */
204
Bob Peterson5e4c7632019-02-21 14:28:07 -0700205static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
206 int *max_revokes)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400207{
208 struct gfs2_bufdata *bd, *s;
209 struct buffer_head *bh;
210
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500211 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400212 bd_ail_st_list) {
213 bh = bd->bd_bh;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500214 gfs2_assert(sdp, bd->bd_tr == tr);
Bob Peterson036330c2019-04-10 11:46:35 -0600215 /*
216 * If another process flagged an io error, e.g. writing to the
217 * journal, error all other bhs and move them off the ail1 to
218 * prevent a tight loop when unmount tries to flush ail1,
219 * regardless of whether they're still busy. If no outside
220 * errors were found and the buffer is busy, move to the next.
221 * If the ail buffer is not busy and caught an error, flag it
222 * for others.
223 */
224 if (!sdp->sd_log_error && buffer_busy(bh))
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100225 continue;
Bob Petersonb524abc2018-10-04 10:21:07 -0500226 if (!buffer_uptodate(bh) &&
Bob Peterson036330c2019-04-10 11:46:35 -0600227 !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400228 gfs2_io_error_bh(sdp, bh);
Bob Peterson69511082019-02-12 13:43:55 -0700229 gfs2_withdraw_delayed(sdp);
Andreas Gruenbacher9e1a9ec2018-06-07 11:56:46 +0100230 }
Bob Peterson5e4c7632019-02-21 14:28:07 -0700231 /*
232 * If we have space for revokes and the bd is no longer on any
233 * buf list, we can just add a revoke for it immediately and
234 * avoid having to put it on the ail2 list, where it would need
235 * to be revoked later.
236 */
237 if (*max_revokes && list_empty(&bd->bd_list)) {
238 gfs2_add_revoke(sdp, bd);
239 (*max_revokes)--;
240 continue;
241 }
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500242 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400243 }
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400244}
245
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100246/**
247 * gfs2_ail1_empty - Try to empty the ail1 lists
248 * @sdp: The superblock
Bob Peterson5e4c7632019-02-21 14:28:07 -0700249 * @max_revokes: If non-zero, add revokes where appropriate
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100250 *
251 * Tries to empty the ail1 lists, starting with the oldest first
252 */
David Teiglandb3b94fa2006-01-16 16:50:04 +0000253
Bob Peterson5e4c7632019-02-21 14:28:07 -0700254static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000255{
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500256 struct gfs2_trans *tr, *s;
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500257 int oldest_tr = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000258 int ret;
259
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000260 spin_lock(&sdp->sd_ail_lock);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500261 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
Bob Peterson5e4c7632019-02-21 14:28:07 -0700262 gfs2_ail1_empty_one(sdp, tr, &max_revokes);
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500263 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500264 list_move(&tr->tr_list, &sdp->sd_ail2_list);
Steven Whitehouse4667a0e2011-04-18 14:18:09 +0100265 else
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500266 oldest_tr = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000267 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000268 ret = list_empty(&sdp->sd_ail1_list);
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000269 spin_unlock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000270
Bob Peterson69511082019-02-12 13:43:55 -0700271 if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
Andreas Gruenbacherbadb55e2020-01-23 18:41:00 +0100272 gfs2_lm(sdp, "fatal: I/O error(s)\n");
273 gfs2_withdraw(sdp);
274 }
Andreas Gruenbacher9e1a9ec2018-06-07 11:56:46 +0100275
David Teiglandb3b94fa2006-01-16 16:50:04 +0000276 return ret;
277}
278
Steven Whitehouse26b06a62011-05-21 19:21:07 +0100279static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
280{
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500281 struct gfs2_trans *tr;
Steven Whitehouse26b06a62011-05-21 19:21:07 +0100282 struct gfs2_bufdata *bd;
283 struct buffer_head *bh;
284
285 spin_lock(&sdp->sd_ail_lock);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500286 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
287 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
Steven Whitehouse26b06a62011-05-21 19:21:07 +0100288 bh = bd->bd_bh;
289 if (!buffer_locked(bh))
290 continue;
291 get_bh(bh);
292 spin_unlock(&sdp->sd_ail_lock);
293 wait_on_buffer(bh);
294 brelse(bh);
295 return;
296 }
297 }
298 spin_unlock(&sdp->sd_ail_lock);
299}
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400300
301/**
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600302 * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400303 */
304
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600305static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
306 struct list_head *head)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400307{
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400308 struct gfs2_bufdata *bd;
309
310 while (!list_empty(head)) {
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600311 bd = list_first_entry(head, struct gfs2_bufdata,
312 bd_ail_st_list);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500313 gfs2_assert(sdp, bd->bd_tr == tr);
Steven Whitehousef91a0d32007-10-15 16:29:05 +0100314 gfs2_remove_from_ail(bd);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400315 }
316}
317
David Teiglandb3b94fa2006-01-16 16:50:04 +0000318static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
319{
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500320 struct gfs2_trans *tr, *safe;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000321 unsigned int old_tail = sdp->sd_log_tail;
322 int wrap = (new_tail < old_tail);
323 int a, b, rm;
324
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000325 spin_lock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000326
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500327 list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
328 a = (old_tail <= tr->tr_first);
329 b = (tr->tr_first < new_tail);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000330 rm = (wrap) ? (a || b) : (a && b);
331 if (!rm)
332 continue;
333
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600334 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500335 list_del(&tr->tr_list);
336 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
337 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
338 kfree(tr);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000339 }
340
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000341 spin_unlock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000342}
343
344/**
Benjamin Marzinski24972552014-05-01 22:26:55 -0500345 * gfs2_log_release - Release a given number of log blocks
346 * @sdp: The GFS2 superblock
347 * @blks: The number of blocks
348 *
349 */
350
351void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
352{
353
354 atomic_add(blks, &sdp->sd_log_blks_free);
355 trace_gfs2_log_blocks(sdp, blks);
356 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
357 sdp->sd_jdesc->jd_blocks);
358 up_read(&sdp->sd_log_flush_lock);
359}
360
361/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000362 * gfs2_log_reserve - Make a log reservation
363 * @sdp: The GFS2 superblock
364 * @blks: The number of blocks to reserve
365 *
Steven Whitehouse89918642007-06-01 15:19:33 +0100366 * Note that we never give out the last few blocks of the journal. Thats
Robert Peterson2332c442007-06-18 14:50:20 -0500367 * due to the fact that there is a small number of header blocks
Steven Whitehouseb0041572006-11-23 10:51:34 -0500368 * associated with each log flush. The exact number can't be known until
369 * flush time, so we ensure that we have just enough free blocks at all
370 * times to avoid running out during a log flush.
371 *
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500372 * We no longer flush the log here, instead we wake up logd to do that
373 * for us. To avoid the thundering herd and to ensure that we deal fairly
374 * with queued waiters, we use an exclusive wait. This means that when we
375 * get woken with enough journal space to get our reservation, we need to
376 * wake the next waiter on the list.
377 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000378 * Returns: errno
379 */
380
381int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
382{
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600383 int ret = 0;
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500384 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500385 unsigned wanted = blks + reserved_blks;
386 DEFINE_WAIT(wait);
387 int did_wait = 0;
388 unsigned int free_blocks;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000389
390 if (gfs2_assert_warn(sdp, blks) ||
391 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
392 return -EINVAL;
Bob Petersonf07b3522017-01-05 16:01:45 -0500393 atomic_add(blks, &sdp->sd_log_blks_needed);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500394retry:
395 free_blocks = atomic_read(&sdp->sd_log_blks_free);
396 if (unlikely(free_blocks <= wanted)) {
397 do {
398 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
399 TASK_UNINTERRUPTIBLE);
400 wake_up(&sdp->sd_logd_waitq);
401 did_wait = 1;
402 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
403 io_schedule();
404 free_blocks = atomic_read(&sdp->sd_log_blks_free);
405 } while(free_blocks <= wanted);
406 finish_wait(&sdp->sd_log_waitq, &wait);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000407 }
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600408 atomic_inc(&sdp->sd_reserving_log);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500409 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600410 free_blocks - blks) != free_blocks) {
411 if (atomic_dec_and_test(&sdp->sd_reserving_log))
412 wake_up(&sdp->sd_reserving_log_wait);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500413 goto retry;
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600414 }
Bob Petersonf07b3522017-01-05 16:01:45 -0500415 atomic_sub(blks, &sdp->sd_log_blks_needed);
Steven Whitehouse63997772009-06-12 08:49:20 +0100416 trace_gfs2_log_blocks(sdp, -blks);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500417
418 /*
419 * If we waited, then so might others, wake them up _after_ we get
420 * our share of the log.
421 */
422 if (unlikely(did_wait))
423 wake_up(&sdp->sd_log_waitq);
Steven Whitehouse484adff2006-03-29 09:12:12 -0500424
425 down_read(&sdp->sd_log_flush_lock);
Benjamin Marzinski24972552014-05-01 22:26:55 -0500426 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
427 gfs2_log_release(sdp, blks);
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600428 ret = -EROFS;
Benjamin Marzinski24972552014-05-01 22:26:55 -0500429 }
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600430 if (atomic_dec_and_test(&sdp->sd_reserving_log))
431 wake_up(&sdp->sd_reserving_log_wait);
432 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000433}
434
David Teiglandb3b94fa2006-01-16 16:50:04 +0000435/**
436 * log_distance - Compute distance between two journal blocks
437 * @sdp: The GFS2 superblock
438 * @newer: The most recent journal block of the pair
439 * @older: The older journal block of the pair
440 *
441 * Compute the distance (in the journal direction) between two
442 * blocks in the journal
443 *
444 * Returns: the distance in blocks
445 */
446
Steven Whitehousefaa31ce2006-09-13 11:13:27 -0400447static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000448 unsigned int older)
449{
450 int dist;
451
452 dist = newer - older;
453 if (dist < 0)
454 dist += sdp->sd_jdesc->jd_blocks;
455
456 return dist;
457}
458
Robert Peterson2332c442007-06-18 14:50:20 -0500459/**
460 * calc_reserved - Calculate the number of blocks to reserve when
461 * refunding a transaction's unused buffers.
462 * @sdp: The GFS2 superblock
463 *
464 * This is complex. We need to reserve room for all our currently used
465 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
466 * all our journaled data buffers for journaled files (e.g. files in the
467 * meta_fs like rindex, or files for which chattr +j was done.)
468 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
469 * will count it as free space (sd_log_blks_free) and corruption will follow.
470 *
471 * We can have metadata bufs and jdata bufs in the same journal. So each
472 * type gets its own log header, for which we need to reserve a block.
473 * In fact, each type has the potential for needing more than one header
474 * in cases where we have more buffers than will fit on a journal page.
475 * Metadata journal entries take up half the space of journaled buffer entries.
476 * Thus, metadata entries have buf_limit (502) and journaled buffers have
477 * databuf_limit (251) before they cause a wrap around.
478 *
479 * Also, we need to reserve blocks for revoke journal entries and one for an
480 * overall header for the lot.
481 *
482 * Returns: the number of blocks reserved
483 */
484static unsigned int calc_reserved(struct gfs2_sbd *sdp)
485{
486 unsigned int reserved = 0;
Steven Whitehouse022ef4f2014-02-21 21:55:33 +0000487 unsigned int mbuf;
488 unsigned int dbuf;
489 struct gfs2_trans *tr = sdp->sd_log_tr;
Robert Peterson2332c442007-06-18 14:50:20 -0500490
Steven Whitehouse022ef4f2014-02-21 21:55:33 +0000491 if (tr) {
492 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
493 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
494 reserved = mbuf + dbuf;
495 /* Account for header blocks */
496 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
497 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
498 }
Robert Peterson2332c442007-06-18 14:50:20 -0500499
Andreas Gruenbacher5d439752020-01-09 13:54:36 +0100500 if (sdp->sd_log_committed_revoke > 0)
501 reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
Robert Peterson2332c442007-06-18 14:50:20 -0500502 /* One for the overall header */
503 if (reserved)
504 reserved++;
505 return reserved;
506}
507
David Teiglandb3b94fa2006-01-16 16:50:04 +0000508static unsigned int current_tail(struct gfs2_sbd *sdp)
509{
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500510 struct gfs2_trans *tr;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000511 unsigned int tail;
512
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000513 spin_lock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000514
Steven Whitehousefaa31ce2006-09-13 11:13:27 -0400515 if (list_empty(&sdp->sd_ail1_list)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000516 tail = sdp->sd_log_head;
Steven Whitehousefaa31ce2006-09-13 11:13:27 -0400517 } else {
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500518 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
519 tr_list);
520 tail = tr->tr_first;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000521 }
522
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000523 spin_unlock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000524
525 return tail;
526}
527
Robert Peterson2332c442007-06-18 14:50:20 -0500528static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000529{
530 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
531
532 ail2_empty(sdp, new_tail);
533
Steven Whitehousefd041f02007-11-08 14:55:03 +0000534 atomic_add(dist, &sdp->sd_log_blks_free);
Steven Whitehouse63997772009-06-12 08:49:20 +0100535 trace_gfs2_log_blocks(sdp, dist);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -0500536 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
537 sdp->sd_jdesc->jd_blocks);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000538
539 sdp->sd_log_tail = new_tail;
540}
541
David Teiglandb3b94fa2006-01-16 16:50:04 +0000542
Bob Peterson9ff78282019-11-13 13:47:02 -0600543void log_flush_wait(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000544{
Steven Whitehouse16615be2007-09-17 10:59:52 +0100545 DEFINE_WAIT(wait);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000546
Steven Whitehouse16615be2007-09-17 10:59:52 +0100547 if (atomic_read(&sdp->sd_log_in_flight)) {
548 do {
549 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
550 TASK_UNINTERRUPTIBLE);
551 if (atomic_read(&sdp->sd_log_in_flight))
552 io_schedule();
553 } while(atomic_read(&sdp->sd_log_in_flight));
554 finish_wait(&sdp->sd_log_flush_wait, &wait);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000555 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000556}
557
Steven Whitehouse45138992013-01-28 09:30:07 +0000558static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
Bob Peterson4a36d082012-02-14 14:49:57 -0500559{
Steven Whitehouse45138992013-01-28 09:30:07 +0000560 struct gfs2_inode *ipa, *ipb;
Bob Peterson4a36d082012-02-14 14:49:57 -0500561
Steven Whitehouse45138992013-01-28 09:30:07 +0000562 ipa = list_entry(a, struct gfs2_inode, i_ordered);
563 ipb = list_entry(b, struct gfs2_inode, i_ordered);
Bob Peterson4a36d082012-02-14 14:49:57 -0500564
Steven Whitehouse45138992013-01-28 09:30:07 +0000565 if (ipa->i_no_addr < ipb->i_no_addr)
Bob Peterson4a36d082012-02-14 14:49:57 -0500566 return -1;
Steven Whitehouse45138992013-01-28 09:30:07 +0000567 if (ipa->i_no_addr > ipb->i_no_addr)
Bob Peterson4a36d082012-02-14 14:49:57 -0500568 return 1;
569 return 0;
570}
571
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100572static void gfs2_ordered_write(struct gfs2_sbd *sdp)
573{
Steven Whitehouse45138992013-01-28 09:30:07 +0000574 struct gfs2_inode *ip;
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100575 LIST_HEAD(written);
576
Steven Whitehouse45138992013-01-28 09:30:07 +0000577 spin_lock(&sdp->sd_ordered_lock);
Andreas Gruenbachera5b1d3f2019-04-05 12:16:14 +0100578 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
579 while (!list_empty(&sdp->sd_log_ordered)) {
580 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
Abhi Das1f23bc72017-12-22 07:55:31 -0600581 if (ip->i_inode.i_mapping->nrpages == 0) {
582 test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
583 list_del(&ip->i_ordered);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100584 continue;
Abhi Das1f23bc72017-12-22 07:55:31 -0600585 }
586 list_move(&ip->i_ordered, &written);
Steven Whitehouse45138992013-01-28 09:30:07 +0000587 spin_unlock(&sdp->sd_ordered_lock);
588 filemap_fdatawrite(ip->i_inode.i_mapping);
589 spin_lock(&sdp->sd_ordered_lock);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100590 }
Andreas Gruenbachera5b1d3f2019-04-05 12:16:14 +0100591 list_splice(&written, &sdp->sd_log_ordered);
Steven Whitehouse45138992013-01-28 09:30:07 +0000592 spin_unlock(&sdp->sd_ordered_lock);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100593}
594
595static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
596{
Steven Whitehouse45138992013-01-28 09:30:07 +0000597 struct gfs2_inode *ip;
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100598
Steven Whitehouse45138992013-01-28 09:30:07 +0000599 spin_lock(&sdp->sd_ordered_lock);
Andreas Gruenbachera5b1d3f2019-04-05 12:16:14 +0100600 while (!list_empty(&sdp->sd_log_ordered)) {
601 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
Steven Whitehouse45138992013-01-28 09:30:07 +0000602 list_del(&ip->i_ordered);
603 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
604 if (ip->i_inode.i_mapping->nrpages == 0)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100605 continue;
Steven Whitehouse45138992013-01-28 09:30:07 +0000606 spin_unlock(&sdp->sd_ordered_lock);
607 filemap_fdatawait(ip->i_inode.i_mapping);
608 spin_lock(&sdp->sd_ordered_lock);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100609 }
Steven Whitehouse45138992013-01-28 09:30:07 +0000610 spin_unlock(&sdp->sd_ordered_lock);
611}
612
613void gfs2_ordered_del_inode(struct gfs2_inode *ip)
614{
615 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
616
617 spin_lock(&sdp->sd_ordered_lock);
618 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
619 list_del(&ip->i_ordered);
620 spin_unlock(&sdp->sd_ordered_lock);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100621}
622
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500623void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
624{
625 struct buffer_head *bh = bd->bd_bh;
626 struct gfs2_glock *gl = bd->bd_gl;
627
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500628 bh->b_private = NULL;
629 bd->bd_blkno = bh->b_blocknr;
Bob Peterson9290a9a2013-12-10 12:06:35 -0500630 gfs2_remove_from_ail(bd); /* drops ref on bh */
631 bd->bd_bh = NULL;
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500632 sdp->sd_log_num_revoke++;
Bob Peterson638803d2019-06-06 07:33:38 -0500633 if (atomic_inc_return(&gl->gl_revokes) == 1)
Andreas Gruenbacher9287c642019-04-04 21:11:11 +0100634 gfs2_glock_hold(gl);
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500635 set_bit(GLF_LFLUSH, &gl->gl_flags);
Andreas Gruenbachera5b1d3f2019-04-05 12:16:14 +0100636 list_add(&bd->bd_list, &sdp->sd_log_revokes);
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500637}
638
Bob Petersonfe5e7ba2019-11-14 09:49:11 -0500639void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
640{
641 if (atomic_dec_return(&gl->gl_revokes) == 0) {
642 clear_bit(GLF_LFLUSH, &gl->gl_flags);
643 gfs2_glock_queue_put(gl);
644 }
645}
646
Bob Peterson5e4c7632019-02-21 14:28:07 -0700647/**
648 * gfs2_write_revokes - Add as many revokes to the system transaction as we can
649 * @sdp: The GFS2 superblock
650 *
651 * Our usual strategy is to defer writing revokes as much as we can in the hope
652 * that we'll eventually overwrite the journal, which will make those revokes
653 * go away. This changes when we flush the log: at that point, there will
654 * likely be some left-over space in the last revoke block of that transaction.
655 * We can fill that space with additional revokes for blocks that have already
656 * been written back. This will basically come at no cost now, and will save
657 * us from having to keep track of those blocks on the AIL2 list later.
658 */
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500659void gfs2_write_revokes(struct gfs2_sbd *sdp)
660{
Bob Peterson5e4c7632019-02-21 14:28:07 -0700661 /* number of revokes we still have room for */
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500662 int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
663
Bob Peterson5e4c7632019-02-21 14:28:07 -0700664 gfs2_log_lock(sdp);
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500665 while (sdp->sd_log_num_revoke > max_revokes)
666 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
667 max_revokes -= sdp->sd_log_num_revoke;
668 if (!sdp->sd_log_num_revoke) {
669 atomic_dec(&sdp->sd_log_blks_free);
670 /* If no blocks have been reserved, we need to also
671 * reserve a block for the header */
672 if (!sdp->sd_log_blks_reserved)
673 atomic_dec(&sdp->sd_log_blks_free);
674 }
Bob Peterson5e4c7632019-02-21 14:28:07 -0700675 gfs2_ail1_empty(sdp, max_revokes);
Benjamin Marzinski5d054962013-06-14 11:38:29 -0500676 gfs2_log_unlock(sdp);
677
678 if (!sdp->sd_log_num_revoke) {
679 atomic_inc(&sdp->sd_log_blks_free);
680 if (!sdp->sd_log_blks_reserved)
681 atomic_inc(&sdp->sd_log_blks_free);
682 }
683}
684
David Teiglandb3b94fa2006-01-16 16:50:04 +0000685/**
Bob Peterson7c70b892019-03-25 09:34:19 -0600686 * gfs2_write_log_header - Write a journal log header buffer at lblock
Bob Peterson588bff92017-12-18 12:48:29 -0600687 * @sdp: The GFS2 superblock
Bob Petersonc1696fb2018-01-17 00:01:33 +0100688 * @jd: journal descriptor of the journal to which we are writing
Bob Peterson588bff92017-12-18 12:48:29 -0600689 * @seq: sequence number
690 * @tail: tail of the log
Bob Peterson7c70b892019-03-25 09:34:19 -0600691 * @lblock: value for lh_blkno (block number relative to start of journal)
Bob Petersonc1696fb2018-01-17 00:01:33 +0100692 * @flags: log header flags GFS2_LOG_HEAD_*
Bob Peterson588bff92017-12-18 12:48:29 -0600693 * @op_flags: flags to pass to the bio
694 *
695 * Returns: the initialized log buffer descriptor
696 */
697
Bob Petersonc1696fb2018-01-17 00:01:33 +0100698void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
Bob Peterson7c70b892019-03-25 09:34:19 -0600699 u64 seq, u32 tail, u32 lblock, u32 flags,
700 int op_flags)
Bob Peterson588bff92017-12-18 12:48:29 -0600701{
702 struct gfs2_log_header *lh;
Bob Petersonc1696fb2018-01-17 00:01:33 +0100703 u32 hash, crc;
Bob Petersonade48082019-11-20 08:53:14 -0500704 struct page *page;
Bob Petersonc1696fb2018-01-17 00:01:33 +0100705 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
706 struct timespec64 tv;
707 struct super_block *sb = sdp->sd_vfs;
Bob Peterson7c70b892019-03-25 09:34:19 -0600708 u64 dblock;
Bob Peterson588bff92017-12-18 12:48:29 -0600709
Bob Petersonade48082019-11-20 08:53:14 -0500710 if (gfs2_withdrawn(sdp))
711 goto out;
712
713 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
Bob Peterson588bff92017-12-18 12:48:29 -0600714 lh = page_address(page);
715 clear_page(lh);
716
717 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
718 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
719 lh->lh_header.__pad0 = cpu_to_be64(0);
720 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
721 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
722 lh->lh_sequence = cpu_to_be64(seq);
723 lh->lh_flags = cpu_to_be32(flags);
724 lh->lh_tail = cpu_to_be32(tail);
Bob Peterson7c70b892019-03-25 09:34:19 -0600725 lh->lh_blkno = cpu_to_be32(lblock);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100726 hash = ~crc32(~0, lh, LH_V1_SIZE);
Bob Peterson588bff92017-12-18 12:48:29 -0600727 lh->lh_hash = cpu_to_be32(hash);
728
Arnd Bergmannee9c7f92018-06-20 15:15:24 -0500729 ktime_get_coarse_real_ts64(&tv);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100730 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
731 lh->lh_sec = cpu_to_be64(tv.tv_sec);
Bob Peterson7c70b892019-03-25 09:34:19 -0600732 if (!list_empty(&jd->extent_list))
Andreas Gruenbacher19ebc052019-08-28 22:21:34 +0200733 dblock = gfs2_log_bmap(jd, lblock);
Bob Peterson7c70b892019-03-25 09:34:19 -0600734 else {
735 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
736 if (gfs2_assert_withdraw(sdp, ret == 0))
737 return;
738 }
739 lh->lh_addr = cpu_to_be64(dblock);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100740 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
741
742 /* We may only write local statfs, quota, etc., when writing to our
743 own journal. The values are left 0 when recovering a journal
744 different from our own. */
745 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
746 lh->lh_statfs_addr =
747 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
748 lh->lh_quota_addr =
749 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
750
751 spin_lock(&sdp->sd_statfs_spin);
752 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
753 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
754 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
755 spin_unlock(&sdp->sd_statfs_spin);
756 }
757
758 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
759
760 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
761 sb->s_blocksize - LH_V1_SIZE - 4);
762 lh->lh_crc = cpu_to_be32(crc);
763
Bob Peterson7c70b892019-03-25 09:34:19 -0600764 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
Abhi Dasf4686c22019-05-02 14:17:40 -0500765 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
Bob Petersonade48082019-11-20 08:53:14 -0500766out:
Bob Peterson588bff92017-12-18 12:48:29 -0600767 log_flush_wait(sdp);
768}
769
770/**
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000771 * log_write_header - Get and initialize a journal header buffer
772 * @sdp: The GFS2 superblock
Bob Petersonc1696fb2018-01-17 00:01:33 +0100773 * @flags: The log header flags, including log header origin
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000774 *
775 * Returns: the initialized log buffer descriptor
776 */
777
Steven Whitehousefdb76a42012-04-02 15:34:36 +0100778static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000779{
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000780 unsigned int tail;
Jan Kara0f0b9b62017-05-02 13:14:13 +0200781 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600782 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000783
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600784 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000785 tail = current_tail(sdp);
786
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000787 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
788 gfs2_ordered_wait(sdp);
789 log_flush_wait(sdp);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600790 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000791 }
Steven Whitehousee8c92ed2012-04-16 09:28:31 +0100792 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100793 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
Bob Peterson7c70b892019-03-25 09:34:19 -0600794 sdp->sd_log_flush_head, flags, op_flags);
Andreas Gruenbacher19ebc052019-08-28 22:21:34 +0200795 gfs2_log_incr_head(sdp);
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000796
797 if (sdp->sd_log_tail != tail)
798 log_pull_tail(sdp, tail);
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000799}
800
801/**
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600802 * ail_drain - drain the ail lists after a withdraw
803 * @sdp: Pointer to GFS2 superblock
804 */
805static void ail_drain(struct gfs2_sbd *sdp)
806{
807 struct gfs2_trans *tr;
808
809 spin_lock(&sdp->sd_ail_lock);
810 /*
811 * For transactions on the sd_ail1_list we need to drain both the
812 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
813 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
814 * before revokes are sent for that block. Items on the sd_ail2_list
815 * should have already gotten beyond that point, so no need.
816 */
817 while (!list_empty(&sdp->sd_ail1_list)) {
818 tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
819 tr_list);
820 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
821 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
822 list_del(&tr->tr_list);
823 kfree(tr);
824 }
825 while (!list_empty(&sdp->sd_ail2_list)) {
826 tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
827 tr_list);
828 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
829 list_del(&tr->tr_list);
830 kfree(tr);
831 }
832 spin_unlock(&sdp->sd_ail_lock);
833}
834
835/**
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400836 * gfs2_log_flush - flush incore transaction(s)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000837 * @sdp: the filesystem
838 * @gl: The glock structure to flush. If NULL, flush the whole incore log
Bob Peterson805c09072018-01-08 10:34:17 -0500839 * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
David Teiglandb3b94fa2006-01-16 16:50:04 +0000840 *
841 */
842
Bob Petersonc1696fb2018-01-17 00:01:33 +0100843void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000844{
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600845 struct gfs2_trans *tr = NULL;
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600846 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000847
Steven Whitehouse484adff2006-03-29 09:12:12 -0500848 down_write(&sdp->sd_log_flush_lock);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000849
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600850 /*
851 * Do this check while holding the log_flush_lock to prevent new
852 * buffers from being added to the ail via gfs2_pin()
853 */
854 if (gfs2_withdrawn(sdp))
855 goto out;
856
Steven Whitehouse2bcd6102007-11-08 14:25:12 +0000857 /* Log might have been flushed while we waited for the flush lock */
858 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
859 up_write(&sdp->sd_log_flush_lock);
860 return;
Steven Whitehousef55ab262006-02-21 12:51:39 +0000861 }
Bob Peterson805c09072018-01-08 10:34:17 -0500862 trace_gfs2_log_flush(sdp, 1, flags);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000863
Bob Petersonc1696fb2018-01-17 00:01:33 +0100864 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
Benjamin Marzinski400ac522015-12-09 07:46:33 -0600865 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
866
Steven Whitehouseb1ab1e42014-02-25 11:52:20 +0000867 sdp->sd_log_flush_head = sdp->sd_log_head;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500868 tr = sdp->sd_log_tr;
869 if (tr) {
870 sdp->sd_log_tr = NULL;
871 INIT_LIST_HEAD(&tr->tr_ail1_list);
872 INIT_LIST_HEAD(&tr->tr_ail2_list);
Steven Whitehouseb1ab1e42014-02-25 11:52:20 +0000873 tr->tr_first = sdp->sd_log_flush_head;
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600874 if (unlikely (state == SFS_FROZEN))
875 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500876 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000877
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600878 if (unlikely(state == SFS_FROZEN))
879 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000880 gfs2_assert_withdraw(sdp,
Andreas Gruenbacher5d439752020-01-09 13:54:36 +0100881 sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000882
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100883 gfs2_ordered_write(sdp);
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600884 if (gfs2_withdrawn(sdp))
885 goto out;
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000886 lops_before_commit(sdp, tr);
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600887 if (gfs2_withdrawn(sdp))
888 goto out;
Abhi Dasf4686c22019-05-02 14:17:40 -0500889 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600890 if (gfs2_withdrawn(sdp))
891 goto out;
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100892
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000893 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
Bob Peterson428fd952014-03-12 10:34:16 -0400894 log_flush_wait(sdp);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100895 log_write_header(sdp, flags);
Steven Whitehouse34cc1782012-03-09 10:45:56 +0000896 } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
Steven Whitehousefd041f02007-11-08 14:55:03 +0000897 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
Steven Whitehouse63997772009-06-12 08:49:20 +0100898 trace_gfs2_log_blocks(sdp, -1);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100899 log_write_header(sdp, flags);
Robert Peterson2332c442007-06-18 14:50:20 -0500900 }
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600901 if (gfs2_withdrawn(sdp))
902 goto out;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500903 lops_after_commit(sdp, tr);
Steven Whitehousefe1a6982006-10-11 13:34:59 -0400904
905 gfs2_log_lock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000906 sdp->sd_log_head = sdp->sd_log_flush_head;
Steven Whitehousefaa31ce2006-09-13 11:13:27 -0400907 sdp->sd_log_blks_reserved = 0;
Andreas Gruenbacher5d439752020-01-09 13:54:36 +0100908 sdp->sd_log_committed_revoke = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000909
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000910 spin_lock(&sdp->sd_ail_lock);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500911 if (tr && !list_empty(&tr->tr_ail1_list)) {
912 list_add(&tr->tr_list, &sdp->sd_ail1_list);
913 tr = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000914 }
Dave Chinnerd6a079e2011-03-11 11:52:25 +0000915 spin_unlock(&sdp->sd_ail_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000916 gfs2_log_unlock(sdp);
Benjamin Marzinski24972552014-05-01 22:26:55 -0500917
Bob Petersonc1696fb2018-01-17 00:01:33 +0100918 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
Benjamin Marzinski24972552014-05-01 22:26:55 -0500919 if (!sdp->sd_log_idle) {
920 for (;;) {
921 gfs2_ail1_start(sdp);
922 gfs2_ail1_wait(sdp);
Bob Peterson5e4c7632019-02-21 14:28:07 -0700923 if (gfs2_ail1_empty(sdp, 0))
Benjamin Marzinski24972552014-05-01 22:26:55 -0500924 break;
925 }
Bob Peterson30fe70a2019-11-13 11:47:09 -0600926 if (gfs2_withdrawn(sdp))
927 goto out;
Benjamin Marzinski24972552014-05-01 22:26:55 -0500928 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
929 trace_gfs2_log_blocks(sdp, -1);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100930 log_write_header(sdp, flags);
Benjamin Marzinski24972552014-05-01 22:26:55 -0500931 sdp->sd_log_head = sdp->sd_log_flush_head;
932 }
Bob Petersonc1696fb2018-01-17 00:01:33 +0100933 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
934 GFS2_LOG_HEAD_FLUSH_FREEZE))
Benjamin Marzinski24972552014-05-01 22:26:55 -0500935 gfs2_log_shutdown(sdp);
Bob Petersonc1696fb2018-01-17 00:01:33 +0100936 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600937 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
Benjamin Marzinski24972552014-05-01 22:26:55 -0500938 }
939
Bob Peterson30fe70a2019-11-13 11:47:09 -0600940out:
Bob Peterson2ca0c2f2019-11-13 13:58:30 -0600941 if (gfs2_withdrawn(sdp)) {
942 ail_drain(sdp); /* frees all transactions */
943 tr = NULL;
944 }
945
Bob Peterson805c09072018-01-08 10:34:17 -0500946 trace_gfs2_log_flush(sdp, 0, flags);
Steven Whitehouse484adff2006-03-29 09:12:12 -0500947 up_write(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000948
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500949 kfree(tr);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000950}
951
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000952/**
953 * gfs2_merge_trans - Merge a new transaction into a cached transaction
954 * @old: Original transaction to be expanded
955 * @new: New transaction to be merged
956 */
957
958static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
959{
Bob Peterson9862ca02017-01-25 12:50:47 -0500960 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000961
962 old->tr_num_buf_new += new->tr_num_buf_new;
963 old->tr_num_databuf_new += new->tr_num_databuf_new;
964 old->tr_num_buf_rm += new->tr_num_buf_rm;
965 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
966 old->tr_num_revoke += new->tr_num_revoke;
Bob Petersona31b4ec2020-01-20 15:49:28 +0100967 old->tr_num_revoke_rm += new->tr_num_revoke_rm;
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000968
969 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
970 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
971}
972
David Teiglandb3b94fa2006-01-16 16:50:04 +0000973static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
974{
Robert Peterson2332c442007-06-18 14:50:20 -0500975 unsigned int reserved;
Steven Whitehouseac39aad2008-01-10 14:49:43 +0000976 unsigned int unused;
Steven Whitehouse022ef4f2014-02-21 21:55:33 +0000977 unsigned int maxres;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000978
979 gfs2_log_lock(sdp);
980
Steven Whitehoused69a3c62014-02-21 15:22:35 +0000981 if (sdp->sd_log_tr) {
982 gfs2_merge_trans(sdp->sd_log_tr, tr);
983 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
Bob Peterson9862ca02017-01-25 12:50:47 -0500984 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500985 sdp->sd_log_tr = tr;
Bob Peterson9862ca02017-01-25 12:50:47 -0500986 set_bit(TR_ATTACHED, &tr->tr_flags);
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500987 }
Steven Whitehouse022ef4f2014-02-21 21:55:33 +0000988
Bob Petersona31b4ec2020-01-20 15:49:28 +0100989 sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
Steven Whitehouse022ef4f2014-02-21 21:55:33 +0000990 reserved = calc_reserved(sdp);
991 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
992 gfs2_assert_withdraw(sdp, maxres >= reserved);
993 unused = maxres - reserved;
994 atomic_add(unused, &sdp->sd_log_blks_free);
995 trace_gfs2_log_blocks(sdp, unused);
996 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
997 sdp->sd_jdesc->jd_blocks);
998 sdp->sd_log_blks_reserved = reserved;
999
David Teiglandb3b94fa2006-01-16 16:50:04 +00001000 gfs2_log_unlock(sdp);
1001}
1002
1003/**
1004 * gfs2_log_commit - Commit a transaction to the log
1005 * @sdp: the filesystem
1006 * @tr: the transaction
1007 *
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001008 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1009 * or the total number of used blocks (pinned blocks plus AIL blocks)
1010 * is greater than thresh2.
1011 *
1012 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
1013 * journal size.
1014 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001015 * Returns: errno
1016 */
1017
1018void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1019{
1020 log_refund(sdp, tr);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001021
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001022 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1023 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1024 atomic_read(&sdp->sd_log_thresh2)))
1025 wake_up(&sdp->sd_logd_waitq);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001026}
1027
1028/**
1029 * gfs2_log_shutdown - write a shutdown header into a journal
1030 * @sdp: the filesystem
1031 *
1032 */
1033
Bob Petersonfeed98a2019-11-14 09:48:26 -05001034static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001035{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001036 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001037 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001038 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1039
1040 sdp->sd_log_flush_head = sdp->sd_log_head;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001041
Bob Peterson805c09072018-01-08 10:34:17 -05001042 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001043
Steven Whitehousea74604b2006-04-21 15:10:46 -04001044 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1045 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001046
1047 sdp->sd_log_head = sdp->sd_log_flush_head;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001048 sdp->sd_log_tail = sdp->sd_log_head;
Steven Whitehousea25311c2006-11-23 11:06:35 -05001049}
1050
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001051static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1052{
Bob Petersonf07b3522017-01-05 16:01:45 -05001053 return (atomic_read(&sdp->sd_log_pinned) +
1054 atomic_read(&sdp->sd_log_blks_needed) >=
1055 atomic_read(&sdp->sd_log_thresh1));
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001056}
1057
1058static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1059{
1060 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
Abhi Dasb066a4eeb2017-08-04 12:15:32 -05001061
1062 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1063 return 1;
1064
Bob Petersonf07b3522017-01-05 16:01:45 -05001065 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1066 atomic_read(&sdp->sd_log_thresh2);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001067}
Steven Whitehouseec69b182007-11-09 10:01:41 +00001068
1069/**
1070 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1071 * @sdp: Pointer to GFS2 superblock
1072 *
1073 * Also, periodically check to make sure that we're using the most recent
1074 * journal index.
1075 */
1076
1077int gfs2_logd(void *data)
1078{
1079 struct gfs2_sbd *sdp = data;
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001080 unsigned long t = 1;
1081 DEFINE_WAIT(wait);
Bob Petersonb63f5e82017-01-06 22:14:28 -05001082 bool did_flush;
Steven Whitehouseec69b182007-11-09 10:01:41 +00001083
1084 while (!kthread_should_stop()) {
Steven Whitehouseec69b182007-11-09 10:01:41 +00001085
Bob Peterson942b0cd2017-08-16 11:30:06 -05001086 /* Check for errors writing to the journal */
1087 if (sdp->sd_log_error) {
Andreas Gruenbacherbadb55e2020-01-23 18:41:00 +01001088 gfs2_lm(sdp,
1089 "GFS2: fsid=%s: error %d: "
1090 "withdrawing the file system to "
1091 "prevent further damage.\n",
1092 sdp->sd_fsname, sdp->sd_log_error);
1093 gfs2_withdraw(sdp);
Bob Peterson942b0cd2017-08-16 11:30:06 -05001094 }
1095
Bob Petersonb63f5e82017-01-06 22:14:28 -05001096 did_flush = false;
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001097 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
Bob Peterson5e4c7632019-02-21 14:28:07 -07001098 gfs2_ail1_empty(sdp, 0);
Bob Peterson805c09072018-01-08 10:34:17 -05001099 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1100 GFS2_LFC_LOGD_JFLUSH_REQD);
Bob Petersonb63f5e82017-01-06 22:14:28 -05001101 did_flush = true;
Steven Whitehouseec69b182007-11-09 10:01:41 +00001102 }
1103
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001104 if (gfs2_ail_flush_reqd(sdp)) {
1105 gfs2_ail1_start(sdp);
Steven Whitehouse26b06a62011-05-21 19:21:07 +01001106 gfs2_ail1_wait(sdp);
Bob Peterson5e4c7632019-02-21 14:28:07 -07001107 gfs2_ail1_empty(sdp, 0);
Bob Peterson805c09072018-01-08 10:34:17 -05001108 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1109 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
Bob Petersonb63f5e82017-01-06 22:14:28 -05001110 did_flush = true;
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001111 }
1112
Bob Petersonb63f5e82017-01-06 22:14:28 -05001113 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
Steven Whitehouse26b06a62011-05-21 19:21:07 +01001114 wake_up(&sdp->sd_log_waitq);
1115
Steven Whitehouseec69b182007-11-09 10:01:41 +00001116 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
Tejun Heoa0acae02011-11-21 12:32:22 -08001117
1118 try_to_freeze();
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001119
1120 do {
1121 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
Steven Whitehouse5f487492010-09-09 14:45:00 +01001122 TASK_INTERRUPTIBLE);
Benjamin Marzinski5e687ea2010-05-04 14:29:16 -05001123 if (!gfs2_ail_flush_reqd(sdp) &&
1124 !gfs2_jrnl_flush_reqd(sdp) &&
1125 !kthread_should_stop())
1126 t = schedule_timeout(t);
1127 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1128 !gfs2_jrnl_flush_reqd(sdp) &&
1129 !kthread_should_stop());
1130 finish_wait(&sdp->sd_logd_waitq, &wait);
Steven Whitehouseec69b182007-11-09 10:01:41 +00001131 }
1132
1133 return 0;
1134}
1135