blob: 650ad77c4d0b41c72392cfa4bb88660bdadee9e6 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersoncf45b752008-01-31 10:31:39 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
David Teiglandb3b94fa2006-01-16 16:50:04 +00007#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050010#include <linux/gfs2_ondisk.h>
Steven Whitehouse6802e342008-05-21 17:03:22 +010011#include <linux/bio.h>
Steven Whitehousec65f7fb2009-10-02 11:54:39 +010012#include <linux/posix_acl.h>
Andreas Gruenbacherf39814f62015-12-24 11:09:40 -050013#include <linux/security.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000014
15#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050016#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000017#include "bmap.h"
18#include "glock.h"
19#include "glops.h"
20#include "inode.h"
21#include "log.h"
22#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000023#include "recovery.h"
24#include "rgrp.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "util.h"
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040026#include "trans.h"
Steven Whitehouse17d539f2011-06-15 10:29:37 +010027#include "dir.h"
Abhi Dasf4686c22019-05-02 14:17:40 -050028#include "lops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029
Benjamin Marzinski2e60d762014-11-13 20:42:04 -060030struct workqueue_struct *gfs2_freeze_wq;
31
Bob Peterson601ef0d2020-01-28 20:23:45 +010032extern struct workqueue_struct *gfs2_control_wq;
33
Steven Whitehouse75549182011-08-02 13:09:36 +010034static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35{
Bob Peterson69a61142021-05-24 11:51:26 -050036 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
Bob Peterson15562c42015-03-16 11:52:05 -050039 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
Steven Whitehouse75549182011-08-02 13:09:36 +010041 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_page->mapping, bh->b_page->flags);
Bob Peterson69a61142021-05-24 11:51:26 -050043 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
Steven Whitehouse75549182011-08-02 13:09:36 +010044 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
Bob Peterson69a61142021-05-24 11:51:26 -050046 gfs2_lm(sdp, "AIL error\n");
Bob Petersonfffe9be2021-07-30 13:23:49 -050047 gfs2_withdraw_delayed(sdp);
Steven Whitehouse75549182011-08-02 13:09:36 +010048}
49
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040050/**
Steven Whitehousedba898b2011-04-14 09:54:02 +010051 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040052 * @gl: the glock
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010053 * @fsync: set when called from fsync (not all buffers will be clean)
Lee Jonesc551f662021-03-30 17:44:29 +010054 * @nr_revokes: Number of buffers to revoke
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040055 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050059static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040061{
Bob Peterson15562c42015-03-16 11:52:05 -050062 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040063 struct list_head *head = &gl->gl_ail_list;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010064 struct gfs2_bufdata *bd, *tmp;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040065 struct buffer_head *bh;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010066 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040067
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010068 gfs2_log_lock(sdp);
Dave Chinnerd6a079e2011-03-11 11:52:25 +000069 spin_lock(&sdp->sd_ail_lock);
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050070 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040073 bh = bd->bd_bh;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010074 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
Steven Whitehouse75549182011-08-02 13:09:36 +010077 gfs2_ail_error(gl, bh);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010078 }
Steven Whitehouse1ad38c42007-09-03 11:01:33 +010079 gfs2_trans_add_revoke(sdp, bd);
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050080 nr_revokes--;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040081 }
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +010082 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
Dave Chinnerd6a079e2011-03-11 11:52:25 +000083 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010084 gfs2_log_unlock(sdp);
Steven Whitehousedba898b2011-04-14 09:54:02 +010085}
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040086
Steven Whitehousedba898b2011-04-14 09:54:02 +010087
Bob Peterson1c634f92019-11-13 14:09:28 -060088static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
Steven Whitehousedba898b2011-04-14 09:54:02 +010089{
Bob Peterson15562c42015-03-16 11:52:05 -050090 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousedba898b2011-04-14 09:54:02 +010091 struct gfs2_trans tr;
Andreas Gruenbacherc968f572021-01-29 16:45:33 +010092 unsigned int revokes;
Bob Peterson1c634f92019-11-13 14:09:28 -060093 int ret;
Steven Whitehousedba898b2011-04-14 09:54:02 +010094
Andreas Gruenbacherc968f572021-01-29 16:45:33 +010095 revokes = atomic_read(&gl->gl_ail_count);
Steven Whitehousedba898b2011-04-14 09:54:02 +010096
Andreas Gruenbacherc968f572021-01-29 16:45:33 +010097 if (!revokes) {
Bob Peterson9ff78282019-11-13 13:47:02 -060098 bool have_revokes;
99 bool log_in_flight;
100
101 /*
102 * We have nothing on the ail, but there could be revokes on
103 * the sdp revoke queue, in which case, we still want to flush
104 * the log and wait for it to finish.
105 *
106 * If the sdp revoke list is empty too, we might still have an
107 * io outstanding for writing revokes, so we should wait for
108 * it before returning.
109 *
110 * If none of these conditions are true, our revokes are all
111 * flushed and we can return.
112 */
113 gfs2_log_lock(sdp);
114 have_revokes = !list_empty(&sdp->sd_log_revokes);
115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 gfs2_log_unlock(sdp);
117 if (have_revokes)
118 goto flush;
119 if (log_in_flight)
120 log_flush_wait(sdp);
Bob Peterson1c634f92019-11-13 14:09:28 -0600121 return 0;
Bob Peterson9ff78282019-11-13 13:47:02 -0600122 }
Steven Whitehousedba898b2011-04-14 09:54:02 +0100123
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100124 memset(&tr, 0, sizeof(tr));
125 set_bit(TR_ONSTACK, &tr.tr_flags);
126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 if (ret)
128 goto flush;
129 __gfs2_ail_flush(gl, 0, revokes);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100130 gfs2_trans_end(sdp);
Andreas Gruenbacherc968f572021-01-29 16:45:33 +0100131
Bob Peterson9ff78282019-11-13 13:47:02 -0600132flush:
Bob Peterson805c09072018-01-08 10:34:17 -0500133 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 GFS2_LFC_AIL_EMPTY_GL);
Bob Peterson1c634f92019-11-13 14:09:28 -0600135 return 0;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100136}
137
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100138void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
Steven Whitehousedba898b2011-04-14 09:54:02 +0100139{
Bob Peterson15562c42015-03-16 11:52:05 -0500140 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100141 unsigned int revokes = atomic_read(&gl->gl_ail_count);
142 int ret;
143
144 if (!revokes)
145 return;
146
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100147 ret = gfs2_trans_begin(sdp, 0, revokes);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100148 if (ret)
149 return;
Andreas Gruenbacher2129b422020-12-17 16:14:30 +0100150 __gfs2_ail_flush(gl, fsync, revokes);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400151 gfs2_trans_end(sdp);
Bob Peterson805c09072018-01-08 10:34:17 -0500152 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
153 GFS2_LFC_AIL_FLUSH);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400154}
Steven Whitehouseba7f7292006-07-26 11:27:10 -0400155
156/**
Bob Peterson4a557522020-10-27 12:29:37 -0500157 * gfs2_rgrp_metasync - sync out the metadata of a resource group
158 * @gl: the glock protecting the resource group
159 *
160 */
161
162static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
163{
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
165 struct address_space *metamapping = &sdp->sd_aspace;
166 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
167 const unsigned bsize = sdp->sd_sb.sb_bsize;
168 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
169 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
170 int error;
171
172 filemap_fdatawrite_range(metamapping, start, end);
173 error = filemap_fdatawait_range(metamapping, start, end);
174 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
175 mapping_set_error(metamapping, error);
176 if (error)
177 gfs2_io_error(sdp);
178 return error;
179}
180
181/**
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000182 * rgrp_go_sync - sync out the metadata for this glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000183 * @gl: the glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000184 *
185 * Called when demoting or unlocking an EX glock. We must flush
186 * to disk all dirty buffers/pages relating to this glock, and must not
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500187 * return to caller to demote/unlock the glock until I/O is complete.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000188 */
189
Bob Peterson1c634f92019-11-13 14:09:28 -0600190static int rgrp_go_sync(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000191{
Bob Peterson15562c42015-03-16 11:52:05 -0500192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Bob Petersonb3422ca2019-11-13 11:50:30 -0600193 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000194 int error;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500195
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000196 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
Bob Peterson1c634f92019-11-13 14:09:28 -0600197 return 0;
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100198 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000199
Bob Peterson805c09072018-01-08 10:34:17 -0500200 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
201 GFS2_LFC_RGRP_GO_SYNC);
Bob Peterson4a557522020-10-27 12:29:37 -0500202 error = gfs2_rgrp_metasync(gl);
Bob Peterson1c634f92019-11-13 14:09:28 -0600203 if (!error)
204 error = gfs2_ail_empty_gl(gl);
Bob Peterson23cfb0c2020-10-15 11:07:26 -0500205 gfs2_free_clones(rgd);
Bob Peterson1c634f92019-11-13 14:09:28 -0600206 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000207}
208
209/**
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000210 * rgrp_go_inval - invalidate the metadata for this glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000211 * @gl: the glock
212 * @flags:
213 *
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000214 * We never used LM_ST_DEFERRED with resource groups, so that we
215 * should always see the metadata flag set here.
216 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000217 */
218
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000219static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000220{
Bob Peterson15562c42015-03-16 11:52:05 -0500221 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000222 struct address_space *mapping = &sdp->sd_aspace;
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500223 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
Bob Peterson23cfb0c2020-10-15 11:07:26 -0500224 const unsigned bsize = sdp->sd_sb.sb_bsize;
225 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
226 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
Bob Peterson39b0f1e2015-06-05 08:38:57 -0500227
Bob Peterson23cfb0c2020-10-15 11:07:26 -0500228 gfs2_rgrp_brelse(rgd);
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100229 WARN_ON_ONCE(!(flags & DIO_METADATA));
Bob Peterson23cfb0c2020-10-15 11:07:26 -0500230 truncate_inode_pages_range(mapping, start, end);
Bob Peterson4b3113a2021-10-05 09:28:17 -0500231 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000232}
233
Andrew Price0e539ca2020-10-07 12:30:58 +0100234static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
235 const char *fs_id_buf)
236{
Alexander Aring16e62812020-11-22 18:10:24 -0500237 struct gfs2_rgrpd *rgd = gl->gl_object;
Andrew Price0e539ca2020-10-07 12:30:58 +0100238
239 if (rgd)
240 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
241}
242
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500243static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
244{
245 struct gfs2_inode *ip;
246
247 spin_lock(&gl->gl_lockref.lock);
248 ip = gl->gl_object;
249 if (ip)
250 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
251 spin_unlock(&gl->gl_lockref.lock);
252 return ip;
253}
254
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500255struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
256{
257 struct gfs2_rgrpd *rgd;
258
259 spin_lock(&gl->gl_lockref.lock);
260 rgd = gl->gl_object;
261 spin_unlock(&gl->gl_lockref.lock);
262
263 return rgd;
264}
265
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500266static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
267{
268 if (!ip)
269 return;
270
271 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
272 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
273}
274
David Teiglandb3b94fa2006-01-16 16:50:04 +0000275/**
Bob Peterson4a557522020-10-27 12:29:37 -0500276 * gfs2_inode_metasync - sync out the metadata of an inode
277 * @gl: the glock protecting the inode
278 *
279 */
280int gfs2_inode_metasync(struct gfs2_glock *gl)
281{
282 struct address_space *metamapping = gfs2_glock2aspace(gl);
283 int error;
284
285 filemap_fdatawrite(metamapping);
286 error = filemap_fdatawait(metamapping);
287 if (error)
288 gfs2_io_error(gl->gl_name.ln_sbd);
289 return error;
290}
291
292/**
293 * inode_go_sync - Sync the dirty metadata of an inode
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500294 * @gl: the glock protecting the inode
295 *
296 */
297
Bob Peterson1c634f92019-11-13 14:09:28 -0600298static int inode_go_sync(struct gfs2_glock *gl)
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500299{
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500300 struct gfs2_inode *ip = gfs2_glock2inode(gl);
301 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
Steven Whitehouse009d8512009-12-08 12:12:13 +0000302 struct address_space *metamapping = gfs2_glock2aspace(gl);
Bob Petersonbbae10f2020-05-08 09:18:03 -0500303 int error = 0, ret;
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +0000304
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500305 if (isreg) {
Steven Whitehouse582d2f72013-12-19 11:04:14 +0000306 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
307 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
308 inode_dio_wait(&ip->i_inode);
309 }
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000310 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500311 goto out;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500312
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100313 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000314
Bob Peterson805c09072018-01-08 10:34:17 -0500315 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
316 GFS2_LFC_INODE_GO_SYNC);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000317 filemap_fdatawrite(metamapping);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500318 if (isreg) {
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000319 struct address_space *mapping = ip->i_inode.i_mapping;
320 filemap_fdatawrite(mapping);
321 error = filemap_fdatawait(mapping);
322 mapping_set_error(mapping, error);
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500323 }
Bob Peterson4a557522020-10-27 12:29:37 -0500324 ret = gfs2_inode_metasync(gl);
Bob Petersonbbae10f2020-05-08 09:18:03 -0500325 if (!error)
326 error = ret;
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000327 gfs2_ail_empty_gl(gl);
Steven Whitehouse52fcd112009-04-20 08:58:45 +0100328 /*
329 * Writeback of the data mapping may cause the dirty flag to be set
330 * so we have to clear it again here.
331 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100332 smp_mb__before_atomic();
Steven Whitehouse52fcd112009-04-20 08:58:45 +0100333 clear_bit(GLF_DIRTY, &gl->gl_flags);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500334
335out:
336 gfs2_clear_glop_pending(ip);
Bob Peterson1c634f92019-11-13 14:09:28 -0600337 return error;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500338}
339
340/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000341 * inode_go_inval - prepare a inode glock to be released
342 * @gl: the glock
343 * @flags:
Geert Uytterhoeven6b49d1d2014-06-29 12:21:39 +0200344 *
345 * Normally we invalidate everything, but if we are moving into
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000346 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
347 * can keep hold of the metadata, since it won't have changed.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000348 *
349 */
350
351static void inode_go_inval(struct gfs2_glock *gl, int flags)
352{
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500353 struct gfs2_inode *ip = gfs2_glock2inode(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000354
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000355 if (flags & DIO_METADATA) {
Steven Whitehouse009d8512009-12-08 12:12:13 +0000356 struct address_space *mapping = gfs2_glock2aspace(gl);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000357 truncate_inode_pages(mapping, 0);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100358 if (ip) {
Bob Petersonf2e70d82021-10-06 09:29:18 -0500359 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100360 forget_all_cached_acls(&ip->i_inode);
Andreas Gruenbacherf39814f62015-12-24 11:09:40 -0500361 security_inode_invalidate_secctx(&ip->i_inode);
Steven Whitehouse17d539f2011-06-15 10:29:37 +0100362 gfs2_dir_hash_inval(ip);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100363 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000364 }
Steven Whitehouseb0041572006-11-23 10:51:34 -0500365
Bob Peterson15562c42015-03-16 11:52:05 -0500366 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
Bob Petersonc1696fb2018-01-17 00:01:33 +0100367 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
Bob Peterson805c09072018-01-08 10:34:17 -0500368 GFS2_LOG_HEAD_FLUSH_NORMAL |
369 GFS2_LFC_INODE_GO_INVAL);
Bob Peterson15562c42015-03-16 11:52:05 -0500370 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
Benjamin Marzinski1ce53362011-06-13 14:27:40 -0500371 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100372 if (ip && S_ISREG(ip->i_inode.i_mode))
Steven Whitehouseb0041572006-11-23 10:51:34 -0500373 truncate_inode_pages(ip->i_inode.i_mapping, 0);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500374
375 gfs2_clear_glop_pending(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000376}
377
378/**
379 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
380 * @gl: the glock
381 *
382 * Returns: 1 if it's ok
383 */
384
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000385static int inode_go_demote_ok(const struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000386{
Bob Peterson15562c42015-03-16 11:52:05 -0500387 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000388
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000389 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
390 return 0;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000391
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000392 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000393}
394
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100395static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
396{
397 const struct gfs2_dinode *str = buf;
Deepa Dinamani95582b02018-05-08 19:36:02 -0700398 struct timespec64 atime;
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100399 u16 height, depth;
Al Viro4a378d82021-02-12 13:22:38 -0500400 umode_t mode = be32_to_cpu(str->di_mode);
Bob Peterson4194dec2021-05-19 14:45:56 -0400401 bool is_new = ip->i_inode.i_state & I_NEW;
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100402
403 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
404 goto corrupt;
Al Viro4a378d82021-02-12 13:22:38 -0500405 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
406 goto corrupt;
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100407 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
Al Viro4a378d82021-02-12 13:22:38 -0500408 ip->i_inode.i_mode = mode;
409 if (is_new) {
410 ip->i_inode.i_rdev = 0;
411 switch (mode & S_IFMT) {
412 case S_IFBLK:
413 case S_IFCHR:
414 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
415 be32_to_cpu(str->di_minor));
416 break;
417 }
Aliasgar Surti098b9c12019-10-04 10:55:29 -0500418 }
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100419
Eric W. Biedermand0546422013-01-31 22:08:10 -0800420 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
421 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
Andreas Gruenbachereebd2e82017-08-01 11:33:17 -0500422 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100423 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
424 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
425 atime.tv_sec = be64_to_cpu(str->di_atime);
426 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
Deepa Dinamani95582b02018-05-08 19:36:02 -0700427 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100428 ip->i_inode.i_atime = atime;
429 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
430 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
431 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
432 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
433
434 ip->i_goal = be64_to_cpu(str->di_goal_meta);
435 ip->i_generation = be64_to_cpu(str->di_generation);
436
437 ip->i_diskflags = be32_to_cpu(str->di_flags);
Steven Whitehouse9964afb2011-06-16 14:06:55 +0100438 ip->i_eattr = be64_to_cpu(str->di_eattr);
439 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100440 gfs2_set_inode_flags(&ip->i_inode);
441 height = be16_to_cpu(str->di_height);
442 if (unlikely(height > GFS2_MAX_META_HEIGHT))
443 goto corrupt;
444 ip->i_height = (u8)height;
445
446 depth = be16_to_cpu(str->di_depth);
447 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
448 goto corrupt;
449 ip->i_depth = (u8)depth;
450 ip->i_entries = be32_to_cpu(str->di_entries);
451
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100452 if (S_ISREG(ip->i_inode.i_mode))
453 gfs2_set_aops(&ip->i_inode);
454
455 return 0;
456corrupt:
457 gfs2_consist_inode(ip);
458 return -EIO;
459}
460
461/**
462 * gfs2_inode_refresh - Refresh the incore copy of the dinode
463 * @ip: The GFS2 inode
464 *
465 * Returns: errno
466 */
467
468int gfs2_inode_refresh(struct gfs2_inode *ip)
469{
470 struct buffer_head *dibh;
471 int error;
472
473 error = gfs2_meta_inode_buffer(ip, &dibh);
474 if (error)
475 return error;
476
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100477 error = gfs2_dinode_in(ip, dibh->b_data);
478 brelse(dibh);
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100479 return error;
480}
481
482/**
Bob Peterson3278b972021-09-29 15:06:21 -0500483 * inode_go_instantiate - read in an inode if necessary
Lee Jonesc551f662021-03-30 17:44:29 +0100484 * @gh: The glock holder
David Teiglandb3b94fa2006-01-16 16:50:04 +0000485 *
486 * Returns: errno
487 */
488
Bob Peterson3278b972021-09-29 15:06:21 -0500489static int inode_go_instantiate(struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000490{
491 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -0500492 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500493 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000494 int error = 0;
495
Bob Petersonf2e70d82021-10-06 09:29:18 -0500496 if (!ip) /* no inode to populate - read it in later */
497 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000498
Bob Petersonec1d3982021-10-05 09:10:51 -0500499 error = gfs2_inode_refresh(ip);
500 if (error)
501 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000502
Steven Whitehouse582d2f72013-12-19 11:04:14 +0000503 if (gh->gh_state != LM_ST_DEFERRED)
504 inode_dio_wait(&ip->i_inode);
505
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000506 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
David Teiglandb3b94fa2006-01-16 16:50:04 +0000507 (gl->gl_state == LM_ST_EXCLUSIVE) &&
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000508 (gh->gh_state == LM_ST_EXCLUSIVE)) {
509 spin_lock(&sdp->sd_trunc_lock);
510 if (list_empty(&ip->i_trunc_list))
Wang Xiboe7cb5502017-07-21 07:40:59 -0500511 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000512 spin_unlock(&sdp->sd_trunc_lock);
513 wake_up(&sdp->sd_quota_wait);
Bob Petersonf2e70d82021-10-06 09:29:18 -0500514 error = 1;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000515 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000516
Bob Petersonf2e70d82021-10-06 09:29:18 -0500517out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000518 return error;
519}
520
521/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100522 * inode_go_dump - print information about an inode
523 * @seq: The iterator
Lee Jonesc551f662021-03-30 17:44:29 +0100524 * @gl: The glock
Bob Peterson3792ce92019-05-09 09:21:48 -0500525 * @fs_id_buf: file system id (may be empty)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100526 *
Steven Whitehouse6802e342008-05-21 17:03:22 +0100527 */
528
Bob Peterson3792ce92019-05-09 09:21:48 -0500529static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
530 const char *fs_id_buf)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100531{
Bob Peterson27a2660f2018-04-18 12:05:01 -0700532 struct gfs2_inode *ip = gl->gl_object;
533 struct inode *inode = &ip->i_inode;
534 unsigned long nrpages;
535
Steven Whitehouse6802e342008-05-21 17:03:22 +0100536 if (ip == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +0000537 return;
Bob Peterson27a2660f2018-04-18 12:05:01 -0700538
539 xa_lock_irq(&inode->i_data.i_pages);
540 nrpages = inode->i_data.nrpages;
541 xa_unlock_irq(&inode->i_data.i_pages);
542
Bob Peterson3792ce92019-05-09 09:21:48 -0500543 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
544 "p:%lu\n", fs_id_buf,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100545 (unsigned long long)ip->i_no_formal_ino,
546 (unsigned long long)ip->i_no_addr,
Steven Whitehousefa75ced2008-11-10 10:10:12 +0000547 IF2DT(ip->i_inode.i_mode), ip->i_flags,
548 (unsigned int)ip->i_diskflags,
Bob Peterson27a2660f2018-04-18 12:05:01 -0700549 (unsigned long long)i_size_read(inode), nrpages);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100550}
551
552/**
Benjamin Marzinski24972552014-05-01 22:26:55 -0500553 * freeze_go_sync - promote/demote the freeze glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000554 * @gl: the glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000555 */
556
Bob Peterson1c634f92019-11-13 14:09:28 -0600557static int freeze_go_sync(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000558{
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600559 int error = 0;
Bob Peterson15562c42015-03-16 11:52:05 -0500560 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000561
Bob Peterson20b32912020-11-18 08:54:31 -0500562 /*
563 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
564 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
565 * all the nodes should have the freeze glock in SH mode and they all
566 * call do_xmote: One for EX and the others for UN. They ALL must
567 * freeze locally, and they ALL must queue freeze work. The freeze_work
568 * calls freeze_func, which tries to reacquire the freeze glock in SH,
569 * effectively waiting for the thaw on the node who holds it in EX.
570 * Once thawed, the work func acquires the freeze glock in
571 * SH and everybody goes back to thawed.
572 */
Bob Petersonf39e7d32020-11-24 10:41:40 -0600573 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
574 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600575 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
576 error = freeze_super(sdp->sd_vfs);
577 if (error) {
Bob Petersonf29e62e2019-05-13 09:42:18 -0500578 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
579 error);
Bob Peterson601ef0d2020-01-28 20:23:45 +0100580 if (gfs2_withdrawn(sdp)) {
581 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
Bob Peterson1c634f92019-11-13 14:09:28 -0600582 return 0;
Bob Peterson601ef0d2020-01-28 20:23:45 +0100583 }
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600584 gfs2_assert_withdraw(sdp, 0);
585 }
586 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
Bob Peterson541656d2020-06-25 13:29:44 -0500587 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
588 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
589 GFS2_LFC_FREEZE_GO_SYNC);
590 else /* read-only mounts */
591 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000592 }
Bob Peterson1c634f92019-11-13 14:09:28 -0600593 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000594}
595
596/**
Benjamin Marzinski24972552014-05-01 22:26:55 -0500597 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000598 * @gl: the glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000599 */
Bob Petersonf68effb2021-03-19 07:56:44 -0400600static int freeze_go_xmote_bh(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000601{
Bob Peterson15562c42015-03-16 11:52:05 -0500602 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400603 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500604 struct gfs2_glock *j_gl = ip->i_gl;
Al Viro55167622006-10-13 21:47:13 -0400605 struct gfs2_log_header_host head;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000606 int error;
607
Steven Whitehouse6802e342008-05-21 17:03:22 +0100608 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500609 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000610
Abhi Dasf4686c22019-05-02 14:17:40 -0500611 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
Bob Peterson9d9b1602021-06-01 09:41:40 -0500612 if (gfs2_assert_withdraw_delayed(sdp, !error))
613 return error;
614 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
615 GFS2_LOG_HEAD_UNMOUNT))
616 return -EIO;
617 sdp->sd_log_sequence = head.lh_sequence + 1;
618 gfs2_log_pointers_init(sdp, head.lh_blkno);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000619 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100620 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000621}
622
623/**
Lee Jonesc551f662021-03-30 17:44:29 +0100624 * freeze_go_demote_ok
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000625 * @gl: the glock
626 *
627 * Always returns 0
628 */
629
Benjamin Marzinski24972552014-05-01 22:26:55 -0500630static int freeze_go_demote_ok(const struct gfs2_glock *gl)
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000631{
632 return 0;
633}
634
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500635/**
636 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
637 * @gl: the glock
Lee Jonesc551f662021-03-30 17:44:29 +0100638 * @remote: true if this came from a different cluster node
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500639 *
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500640 * gl_lockref.lock lock is held while calling this
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500641 */
Steven Whitehouse81ffbf62013-04-10 10:26:55 +0100642static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500643{
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500644 struct gfs2_inode *ip = gl->gl_object;
Bob Peterson15562c42015-03-16 11:52:05 -0500645 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse001e8e82011-03-30 14:17:51 +0100646
David Howellsbc98a422017-07-17 08:45:34 +0100647 if (!remote || sb_rdonly(sdp->sd_vfs))
Steven Whitehouse001e8e82011-03-30 14:17:51 +0100648 return;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500649
650 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
Steven Whitehouse009d8512009-12-08 12:12:13 +0000651 gl->gl_state == LM_ST_SHARED && ip) {
Steven Whitehousee66cf162013-10-15 15:18:08 +0100652 gl->gl_lockref.count++;
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100653 if (!queue_delayed_work(gfs2_delete_workqueue,
654 &gl->gl_delete, 0))
Steven Whitehousee66cf162013-10-15 15:18:08 +0100655 gl->gl_lockref.count--;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500656 }
657}
658
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100659static int iopen_go_demote_ok(const struct gfs2_glock *gl)
660{
661 return !gfs2_delete_work_queued(gl);
662}
663
Bob Peterson601ef0d2020-01-28 20:23:45 +0100664/**
665 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
666 * @gl: glock being freed
667 *
668 * For now, this is only used for the journal inode glock. In withdraw
669 * situations, we need to wait for the glock to be freed so that we know
670 * other nodes may proceed with recovery / journal replay.
671 */
672static void inode_go_free(struct gfs2_glock *gl)
673{
674 /* Note that we cannot reference gl_object because it's already set
675 * to NULL by this point in its lifecycle. */
676 if (!test_bit(GLF_FREEING, &gl->gl_flags))
677 return;
678 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
679 wake_up_bit(&gl->gl_flags, GLF_FREEING);
680}
681
682/**
683 * nondisk_go_callback - used to signal when a node did a withdraw
684 * @gl: the nondisk glock
685 * @remote: true if this came from a different cluster node
686 *
687 */
688static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
689{
690 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
691
692 /* Ignore the callback unless it's from another node, and it's the
693 live lock. */
694 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
695 return;
696
697 /* First order of business is to cancel the demote request. We don't
698 * really want to demote a nondisk glock. At best it's just to inform
699 * us of another node's withdraw. We'll keep it in SH mode. */
700 clear_bit(GLF_DEMOTE, &gl->gl_flags);
701 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
702
703 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
704 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
705 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
706 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
707 return;
708
709 /* We only care when a node wants us to unlock, because that means
710 * they want a journal recovered. */
711 if (gl->gl_demote_state != LM_ST_UNLOCKED)
712 return;
713
714 if (sdp->sd_args.ar_spectator) {
715 fs_warn(sdp, "Spectator node cannot recover journals.\n");
716 return;
717 }
718
719 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
720 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
721 /*
722 * We can't call remote_withdraw directly here or gfs2_recover_journal
723 * because this is called from the glock unlock function and the
724 * remote_withdraw needs to enqueue and dequeue the same "live" glock
725 * we were called from. So we queue it to the control work queue in
726 * lock_dlm.
727 */
728 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
729}
730
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400731const struct gfs2_glock_operations gfs2_meta_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400732 .go_type = LM_TYPE_META,
Bob Petersona72d2402019-06-13 13:28:45 -0500733 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000734};
735
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400736const struct gfs2_glock_operations gfs2_inode_glops = {
Bob Peterson06dfc302012-10-24 14:41:05 -0400737 .go_sync = inode_go_sync,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000738 .go_inval = inode_go_inval,
739 .go_demote_ok = inode_go_demote_ok,
Bob Peterson3278b972021-09-29 15:06:21 -0500740 .go_instantiate = inode_go_instantiate,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100741 .go_dump = inode_go_dump,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400742 .go_type = LM_TYPE_INODE,
Andreas Gruenbacherf286d622020-01-13 21:21:49 +0100743 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
Bob Peterson601ef0d2020-01-28 20:23:45 +0100744 .go_free = inode_go_free,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000745};
746
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400747const struct gfs2_glock_operations gfs2_rgrp_glops = {
Bob Peterson06dfc302012-10-24 14:41:05 -0400748 .go_sync = rgrp_go_sync,
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000749 .go_inval = rgrp_go_inval,
Bob Peterson3278b972021-09-29 15:06:21 -0500750 .go_instantiate = gfs2_rgrp_go_instantiate,
Andrew Price0e539ca2020-10-07 12:30:58 +0100751 .go_dump = gfs2_rgrp_go_dump,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400752 .go_type = LM_TYPE_RGRP,
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000753 .go_flags = GLOF_LVB,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000754};
755
Benjamin Marzinski24972552014-05-01 22:26:55 -0500756const struct gfs2_glock_operations gfs2_freeze_glops = {
757 .go_sync = freeze_go_sync,
758 .go_xmote_bh = freeze_go_xmote_bh,
759 .go_demote_ok = freeze_go_demote_ok,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400760 .go_type = LM_TYPE_NONDISK,
Bob Petersona72d2402019-06-13 13:28:45 -0500761 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000762};
763
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400764const struct gfs2_glock_operations gfs2_iopen_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400765 .go_type = LM_TYPE_IOPEN,
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500766 .go_callback = iopen_go_callback,
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100767 .go_demote_ok = iopen_go_demote_ok,
Bob Petersona72d2402019-06-13 13:28:45 -0500768 .go_flags = GLOF_LRU | GLOF_NONDISK,
Alexander Aring515b2692020-11-23 10:53:35 -0500769 .go_subclass = 1,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000770};
771
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400772const struct gfs2_glock_operations gfs2_flock_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400773 .go_type = LM_TYPE_FLOCK,
Bob Petersona72d2402019-06-13 13:28:45 -0500774 .go_flags = GLOF_LRU | GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000775};
776
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400777const struct gfs2_glock_operations gfs2_nondisk_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400778 .go_type = LM_TYPE_NONDISK,
Bob Petersona72d2402019-06-13 13:28:45 -0500779 .go_flags = GLOF_NONDISK,
Bob Peterson601ef0d2020-01-28 20:23:45 +0100780 .go_callback = nondisk_go_callback,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000781};
782
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400783const struct gfs2_glock_operations gfs2_quota_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400784 .go_type = LM_TYPE_QUOTA,
Bob Petersona72d2402019-06-13 13:28:45 -0500785 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000786};
787
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400788const struct gfs2_glock_operations gfs2_journal_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400789 .go_type = LM_TYPE_JOURNAL,
Bob Petersona72d2402019-06-13 13:28:45 -0500790 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000791};
792
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000793const struct gfs2_glock_operations *gfs2_glops_list[] = {
794 [LM_TYPE_META] = &gfs2_meta_glops,
795 [LM_TYPE_INODE] = &gfs2_inode_glops,
796 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000797 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
798 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
799 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
800 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
801 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
802};
803