blob: 4f8642301801b4be9cfd613e08a60c7e273d1f9c [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001/* SPDX-License-Identifier: GPL-2.0-only */
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04004 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#ifndef __GLOCK_DOT_H__
8#define __GLOCK_DOT_H__
9
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040010#include <linux/sched.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000011#include <linux/parser.h>
Steven Whitehousef2f7ba52006-09-05 10:39:21 -040012#include "incore.h"
Bob Petersondf3d87bd2017-07-18 11:35:04 -050013#include "util.h"
Steven Whitehousef2f7ba52006-09-05 10:39:21 -040014
Steven Whitehousef057f6c2009-01-12 10:43:39 +000015/* Options for hostdata parser */
16
17enum {
18 Opt_jid,
19 Opt_id,
20 Opt_first,
21 Opt_nodir,
22 Opt_err,
23};
24
25/*
26 * lm_lockname types
27 */
28
29#define LM_TYPE_RESERVED 0x00
30#define LM_TYPE_NONDISK 0x01
31#define LM_TYPE_INODE 0x02
32#define LM_TYPE_RGRP 0x03
33#define LM_TYPE_META 0x04
34#define LM_TYPE_IOPEN 0x05
35#define LM_TYPE_FLOCK 0x06
36#define LM_TYPE_PLOCK 0x07
37#define LM_TYPE_QUOTA 0x08
38#define LM_TYPE_JOURNAL 0x09
39
40/*
41 * lm_lock() states
42 *
43 * SHARED is compatible with SHARED, not with DEFERRED or EX.
44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
45 */
46
47#define LM_ST_UNLOCKED 0
48#define LM_ST_EXCLUSIVE 1
49#define LM_ST_DEFERRED 2
50#define LM_ST_SHARED 3
51
52/*
53 * lm_lock() flags
54 *
55 * LM_FLAG_TRY
56 * Don't wait to acquire the lock if it can't be granted immediately.
57 *
58 * LM_FLAG_TRY_1CB
59 * Send one blocking callback if TRY is set and the lock is not granted.
60 *
61 * LM_FLAG_NOEXP
62 * GFS sets this flag on lock requests it makes while doing journal recovery.
63 * These special requests should not be blocked due to the recovery like
64 * ordinary locks would be.
65 *
66 * LM_FLAG_ANY
67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
68 * also be granted in SHARED. The preferred state is whichever is compatible
69 * with other granted locks, or the specified state if no other locks exist.
70 *
71 * LM_FLAG_PRIORITY
72 * Override fairness considerations. Suppose a lock is held in a shared state
73 * and there is a pending request for the deferred state. A shared lock
74 * request with the priority flag would be allowed to bypass the deferred
75 * request and directly join the other shared lock. A shared lock request
76 * without the priority flag might be forced to wait until the deferred
77 * requested had acquired and released the lock.
Bob Peterson06e908c2018-04-18 13:58:19 -070078 *
79 * LM_FLAG_NODE_SCOPE
80 * This holder agrees to share the lock within this node. In other words,
81 * the glock is held in EX mode according to DLM, but local holders on the
82 * same node can share it.
Steven Whitehousef057f6c2009-01-12 10:43:39 +000083 */
84
Bob Petersonb58bf402015-07-24 09:45:43 -050085#define LM_FLAG_TRY 0x0001
86#define LM_FLAG_TRY_1CB 0x0002
87#define LM_FLAG_NOEXP 0x0004
88#define LM_FLAG_ANY 0x0008
89#define LM_FLAG_PRIORITY 0x0010
Bob Peterson06e908c2018-04-18 13:58:19 -070090#define LM_FLAG_NODE_SCOPE 0x0020
Bob Petersonb58bf402015-07-24 09:45:43 -050091#define GL_ASYNC 0x0040
92#define GL_EXACT 0x0080
93#define GL_SKIP 0x0100
94#define GL_NOCACHE 0x0400
Steven Whitehousef057f6c2009-01-12 10:43:39 +000095
96/*
Steven Whitehouse921169c2010-11-29 12:50:38 +000097 * lm_async_cb return flags
Steven Whitehousef057f6c2009-01-12 10:43:39 +000098 *
99 * LM_OUT_ST_MASK
100 * Masks the lower two bits of lock state in the returned value.
101 *
102 * LM_OUT_CANCELED
103 * The lock request was canceled.
104 *
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000105 */
106
107#define LM_OUT_ST_MASK 0x00000003
108#define LM_OUT_CANCELED 0x00000008
Steven Whitehouse921169c2010-11-29 12:50:38 +0000109#define LM_OUT_ERROR 0x00000004
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000110
111/*
112 * lm_recovery_done() messages
113 */
114
115#define LM_RD_GAVEUP 308
116#define LM_RD_SUCCESS 309
117
118#define GLR_TRYFAILED 13
119
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400120#define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
121#define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
122#define GL_GLOCK_MIN_HOLD (long)(10)
123#define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
124#define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
125
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000126struct lm_lockops {
127 const char *lm_proto_name;
David Teiglande0c2a9a2012-01-09 17:18:05 -0500128 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
129 void (*lm_first_done) (struct gfs2_sbd *sdp);
130 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
131 unsigned int result);
132 void (*lm_unmount) (struct gfs2_sbd *sdp);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000133 void (*lm_withdraw) (struct gfs2_sbd *sdp);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000134 void (*lm_put_lock) (struct gfs2_glock *gl);
Steven Whitehouse921169c2010-11-29 12:50:38 +0000135 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
136 unsigned int flags);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000137 void (*lm_cancel) (struct gfs2_glock *gl);
138 const match_table_t *lm_tokens;
139};
140
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500141extern struct workqueue_struct *gfs2_delete_workqueue;
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000142static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000143{
144 struct gfs2_holder *gh;
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800145 struct pid *pid;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000146
147 /* Look in glock's list of holders for one with current task as owner */
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500148 spin_lock(&gl->gl_lockref.lock);
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800149 pid = task_pid(current);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000150 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
Steven Whitehouse6802e342008-05-21 17:03:22 +0100151 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
152 break;
Bob Petersondc732902021-08-19 20:51:23 +0200153 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
154 continue;
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000155 if (gh->gh_owner_pid == pid)
156 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000157 }
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000158 gh = NULL;
159out:
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500160 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000161
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000162 return gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000163}
164
165static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
166{
Steven Whitehouse50299962006-09-04 09:49:55 -0400167 return gl->gl_state == LM_ST_EXCLUSIVE;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000168}
169
170static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
171{
Steven Whitehouse50299962006-09-04 09:49:55 -0400172 return gl->gl_state == LM_ST_DEFERRED;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000173}
174
175static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
176{
Steven Whitehouse50299962006-09-04 09:49:55 -0400177 return gl->gl_state == LM_ST_SHARED;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000178}
179
Steven Whitehouse009d8512009-12-08 12:12:13 +0000180static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
181{
182 if (gl->gl_ops->go_flags & GLOF_ASPACE)
183 return (struct address_space *)(gl + 1);
184 return NULL;
185}
186
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100187extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
188 const struct gfs2_glock_operations *glops,
189 int create, struct gfs2_glock **glp);
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500190extern void gfs2_glock_hold(struct gfs2_glock *gl);
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100191extern void gfs2_glock_put(struct gfs2_glock *gl);
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500192extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
Andreas Gruenbacherb016d9a2021-09-30 13:49:36 -0500193
194extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
195 u16 flags, struct gfs2_holder *gh,
196 unsigned long ip);
197static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
198 u16 flags, struct gfs2_holder *gh) {
199 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
200}
201
Bob Petersonb58bf402015-07-24 09:45:43 -0500202extern void gfs2_holder_reinit(unsigned int state, u16 flags,
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100203 struct gfs2_holder *gh);
204extern void gfs2_holder_uninit(struct gfs2_holder *gh);
205extern int gfs2_glock_nq(struct gfs2_holder *gh);
206extern int gfs2_glock_poll(struct gfs2_holder *gh);
Bob Petersonf2e70d82021-10-06 09:29:18 -0500207extern int gfs2_instantiate(struct gfs2_holder *gh);
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100208extern int gfs2_glock_wait(struct gfs2_holder *gh);
Bob Petersonad269672019-08-30 12:31:02 -0500209extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100210extern void gfs2_glock_dq(struct gfs2_holder *gh);
211extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
212extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
213extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
214 const struct gfs2_glock_operations *glops,
Bob Petersonb58bf402015-07-24 09:45:43 -0500215 unsigned int state, u16 flags,
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100216 struct gfs2_holder *gh);
217extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
218extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
Bob Peterson3792ce92019-05-09 09:21:48 -0500219extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
220 bool fsid);
221#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
222 gfs2_dump_glock(NULL, gl, true); \
223 BUG(); } } while(0)
Bob Petersonea4e61c2020-05-23 08:13:50 -0500224#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
225 gfs2_dump_glock(NULL, gl, true); \
226 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
227 while (0)
228#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
229 gfs2_dump_glock(NULL, gl, true); \
230 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
231 while (0)
232
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100233extern __printf(2, 3)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100234void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000235
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500236/**
Andrea Gelmini33027af2010-10-16 15:19:22 +0200237 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500238 * @gl: the glock
239 * @state: the state we're requesting
240 * @flags: the modifier flags
241 * @gh: the holder structure
242 *
243 * Returns: 0, GLR_*, or errno
244 */
245
246static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
Bob Petersonb58bf402015-07-24 09:45:43 -0500247 unsigned int state, u16 flags,
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500248 struct gfs2_holder *gh)
249{
250 int error;
251
Andreas Gruenbacherb016d9a2021-09-30 13:49:36 -0500252 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500253
254 error = gfs2_glock_nq(gh);
255 if (error)
256 gfs2_holder_uninit(gh);
257
258 return error;
259}
260
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000261extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
262extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100263extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
264extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
265extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
266extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000267extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
268extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
269extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
Steven Whitehouse29687a22011-03-30 16:33:25 +0100270extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
Steven Whitehousefc0e38d2011-03-09 10:58:04 +0000271extern void gfs2_glock_free(struct gfs2_glock *gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000272
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000273extern int __init gfs2_glock_init(void);
274extern void gfs2_glock_exit(void);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000275
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +0100276extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000277extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +0100278extern void gfs2_register_debugfs(void);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000279extern void gfs2_unregister_debugfs(void);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400280
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000281extern const struct lm_lockops gfs2_dlm_ops;
282
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -0500283static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
284{
285 gh->gh_gl = NULL;
286}
287
288static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
289{
290 return gh->gh_gl;
291}
292
Bob Petersonad269672019-08-30 12:31:02 -0500293static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
294{
295 return !list_empty(&gh->gh_list);
296}
297
Bob Petersondf3d87bd2017-07-18 11:35:04 -0500298/**
299 * glock_set_object - set the gl_object field of a glock
300 * @gl: the glock
301 * @object: the object
302 */
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500303static inline void glock_set_object(struct gfs2_glock *gl, void *object)
304{
305 spin_lock(&gl->gl_lockref.lock);
Bob Petersondf3d87bd2017-07-18 11:35:04 -0500306 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
Bob Peterson3792ce92019-05-09 09:21:48 -0500307 gfs2_dump_glock(NULL, gl, true);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500308 gl->gl_object = object;
309 spin_unlock(&gl->gl_lockref.lock);
310}
311
Bob Petersondf3d87bd2017-07-18 11:35:04 -0500312/**
313 * glock_clear_object - clear the gl_object field of a glock
314 * @gl: the glock
315 * @object: the object
316 *
317 * I'd love to similarly add this:
318 * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
Bob Peterson3792ce92019-05-09 09:21:48 -0500319 * gfs2_dump_glock(NULL, gl, true);
Bob Petersondf3d87bd2017-07-18 11:35:04 -0500320 * Unfortunately, that's not possible because as soon as gfs2_delete_inode
321 * frees the block in the rgrp, another process can reassign it for an I_NEW
322 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
323 * That means gfs2_delete_inode may subsequently try to call this function
324 * for a glock that's already pointing to a brand new inode. If we clear the
325 * new inode's gl_object, we'll introduce metadata corruption. Function
326 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
327 * tries to clear gl_object, so it's more than just gfs2_delete_inode.
328 *
329 */
330static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
331{
332 spin_lock(&gl->gl_lockref.lock);
333 if (gl->gl_object == object)
334 gl->gl_object = NULL;
335 spin_unlock(&gl->gl_lockref.lock);
336}
337
Bob Petersondc732902021-08-19 20:51:23 +0200338static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
339{
340 struct gfs2_glock *gl = gh->gh_gl;
341
342 spin_lock(&gl->gl_lockref.lock);
343 set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
344 spin_unlock(&gl->gl_lockref.lock);
345}
346
347static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
348{
349 struct gfs2_glock *gl = gh->gh_gl;
350
351 spin_lock(&gl->gl_lockref.lock);
352 clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
353 spin_unlock(&gl->gl_lockref.lock);
354}
355
Andreas Gruenbacherf286d622020-01-13 21:21:49 +0100356extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
357extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
358
David Teiglandb3b94fa2006-01-16 16:50:04 +0000359#endif /* __GLOCK_DOT_H__ */