blob: b7123de7c180eb3ebc2affdf66132bdf9a411035 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersoncf45b752008-01-31 10:31:39 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
Joe Perchesd77d1b52014-03-06 12:10:45 -08007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Teiglandb3b94fa2006-01-16 16:50:04 +00009#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000012#include <linux/buffer_head.h>
13#include <linux/delay.h>
14#include <linux/sort.h>
Andreas Gruenbacher05154802017-08-01 11:18:26 -050015#include <linux/hash.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000016#include <linux/jhash.h>
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050017#include <linux/kallsyms.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse24264432006-09-11 21:40:30 -040019#include <linux/list.h>
Steven Whitehousefee852e2007-01-17 15:33:23 +000020#include <linux/wait.h>
akpm@linux-foundation.org95d97b72007-03-05 23:10:39 -080021#include <linux/module.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Robert Peterson7c52b162007-03-16 10:26:37 +000023#include <linux/seq_file.h>
24#include <linux/debugfs.h>
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +010025#include <linux/kthread.h>
26#include <linux/freezer.h>
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050027#include <linux/workqueue.h>
28#include <linux/jiffies.h>
Steven Whitehousebc015cb2011-01-19 09:30:01 +000029#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
Steven Whitehousea2457692012-01-20 10:38:36 +000032#include <linux/percpu.h>
Steven Whitehouse4506a5192013-02-01 20:36:03 +000033#include <linux/list_sort.h>
Steven Whitehousee66cf162013-10-15 15:18:08 +010034#include <linux/lockref.h>
Bob Peterson88ffbf32015-03-16 11:02:46 -050035#include <linux/rhashtable.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000036
37#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050038#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000039#include "glock.h"
40#include "glops.h"
41#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000042#include "lops.h"
43#include "meta_io.h"
44#include "quota.h"
45#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050046#include "util.h"
Steven Whitehouse813e0c42008-11-18 13:38:48 +000047#include "bmap.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010048#define CREATE_TRACE_POINTS
49#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000050
Steven Whitehouse6802e342008-05-21 17:03:22 +010051struct gfs2_glock_iter {
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +010052 struct gfs2_sbd *sdp; /* incore superblock */
Bob Peterson88ffbf32015-03-16 11:02:46 -050053 struct rhashtable_iter hti; /* rhashtable iterator */
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +010054 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
Robert Peterson7c52b162007-03-16 10:26:37 +000056};
57
David Teiglandb3b94fa2006-01-16 16:50:04 +000058typedef void (*glock_examiner) (struct gfs2_glock * gl);
59
Steven Whitehouse6802e342008-05-21 17:03:22 +010060static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050061
Robert Peterson7c52b162007-03-16 10:26:37 +000062static struct dentry *gfs2_root;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050063static struct workqueue_struct *glock_workqueue;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -050064struct workqueue_struct *gfs2_delete_workqueue;
Steven Whitehouse97cc10252008-11-20 13:39:47 +000065static LIST_HEAD(lru_list);
66static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawalleb8374e2008-12-25 15:35:27 +010067static DEFINE_SPINLOCK(lru_lock);
Adrian Bunk08bc2db2006-04-28 10:59:12 -040068
Steven Whitehouseb6397892006-09-12 10:10:01 -040069#define GFS2_GL_HASH_SHIFT 15
Fabian Frederick47a9a522016-08-02 12:05:27 -050070#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
Steven Whitehouse087efdd2006-09-09 16:59:11 -040071
Arvind Yadavd296b152017-08-30 07:50:03 -050072static const struct rhashtable_params ht_parms = {
Bob Peterson88ffbf32015-03-16 11:02:46 -050073 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
Andreas Gruenbacher972b0442017-03-16 09:54:57 -040074 .key_len = offsetofend(struct lm_lockname, ln_type),
Bob Peterson88ffbf32015-03-16 11:02:46 -050075 .key_offset = offsetof(struct gfs2_glock, gl_name),
76 .head_offset = offsetof(struct gfs2_glock, gl_node),
77};
Steven Whitehouse087efdd2006-09-09 16:59:11 -040078
Bob Peterson88ffbf32015-03-16 11:02:46 -050079static struct rhashtable gl_hash_table;
David Teiglandb3b94fa2006-01-16 16:50:04 +000080
Andreas Gruenbacher05154802017-08-01 11:18:26 -050081#define GLOCK_WAIT_TABLE_BITS 12
82#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
83static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
84
85struct wait_glock_queue {
86 struct lm_lockname *name;
87 wait_queue_entry_t wait;
88};
89
90static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
91 int sync, void *key)
92{
93 struct wait_glock_queue *wait_glock =
94 container_of(wait, struct wait_glock_queue, wait);
95 struct lm_lockname *wait_name = wait_glock->name;
96 struct lm_lockname *wake_name = key;
97
98 if (wake_name->ln_sbd != wait_name->ln_sbd ||
99 wake_name->ln_number != wait_name->ln_number ||
100 wake_name->ln_type != wait_name->ln_type)
101 return 0;
102 return autoremove_wake_function(wait, mode, sync, key);
103}
104
105static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
106{
Andreas Gruenbacher605b0482019-03-06 15:41:57 +0100107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500108
109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
110}
111
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500112/**
113 * wake_up_glock - Wake up waiters on a glock
114 * @gl: the glock
115 */
116static void wake_up_glock(struct gfs2_glock *gl)
117{
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
119
120 if (waitqueue_active(wq))
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
122}
123
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500124static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000125{
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000127
David Teiglanddba2d702012-11-14 13:46:53 -0500128 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000129 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
David Teiglanddba2d702012-11-14 13:46:53 -0500130 } else {
David Teigland4e2f8842012-11-14 13:47:37 -0500131 kfree(gl->gl_lksb.sb_lvbptr);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000132 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglanddba2d702012-11-14 13:46:53 -0500133 }
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500134}
135
136void gfs2_glock_free(struct gfs2_glock *gl)
137{
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
139
Bob Peterson638803d2019-06-06 07:33:38 -0500140 BUG_ON(atomic_read(&gl->gl_revokes));
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
142 smp_mb();
143 wake_up_glock(gl);
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000145 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
146 wake_up(&sdp->sd_glock_wait);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000147}
148
149/**
150 * gfs2_glock_hold() - increment reference count on glock
151 * @gl: The glock to hold
152 *
153 */
154
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500155void gfs2_glock_hold(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000156{
Steven Whitehousee66cf162013-10-15 15:18:08 +0100157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
158 lockref_get(&gl->gl_lockref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000159}
160
161/**
Benjamin Marzinski8ff22a62009-07-10 18:04:24 -0500162 * demote_ok - Check to see if it's ok to unlock a glock
163 * @gl: the glock
164 *
165 * Returns: 1 if it's ok
166 */
167
168static int demote_ok(const struct gfs2_glock *gl)
169{
170 const struct gfs2_glock_operations *glops = gl->gl_ops;
171
172 if (gl->gl_state == LM_ST_UNLOCKED)
173 return 0;
Steven Whitehousef42ab082011-04-14 16:50:31 +0100174 if (!list_empty(&gl->gl_holders))
Benjamin Marzinski8ff22a62009-07-10 18:04:24 -0500175 return 0;
176 if (glops->go_demote_ok)
177 return glops->go_demote_ok(gl);
178 return 1;
179}
180
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000181
Steven Whitehouse29687a22011-03-30 16:33:25 +0100182void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
183{
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000184 if (!(gl->gl_ops->go_flags & GLOF_LRU))
185 return;
186
Steven Whitehouse29687a22011-03-30 16:33:25 +0100187 spin_lock(&lru_lock);
188
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000189 list_del(&gl->gl_lru);
Steven Whitehouse29687a22011-03-30 16:33:25 +0100190 list_add_tail(&gl->gl_lru, &lru_list);
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000191
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
193 set_bit(GLF_LRU, &gl->gl_flags);
194 atomic_inc(&lru_count);
195 }
196
Steven Whitehouse29687a22011-03-30 16:33:25 +0100197 spin_unlock(&lru_lock);
198}
199
Bob Peterson8f6cb402015-01-05 13:25:10 -0500200static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
Steven Whitehousef42ab082011-04-14 16:50:31 +0100201{
Bob Peterson645ebd42017-07-26 10:57:35 -0500202 if (!(gl->gl_ops->go_flags & GLOF_LRU))
203 return;
204
Bob Peterson8f6cb402015-01-05 13:25:10 -0500205 spin_lock(&lru_lock);
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000206 if (test_bit(GLF_LRU, &gl->gl_flags)) {
Steven Whitehousef42ab082011-04-14 16:50:31 +0100207 list_del_init(&gl->gl_lru);
208 atomic_dec(&lru_count);
209 clear_bit(GLF_LRU, &gl->gl_flags);
210 }
211 spin_unlock(&lru_lock);
212}
213
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500214/*
215 * Enqueue the glock on the work queue. Passes one glock reference on to the
216 * work queue.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000217 */
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500218static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
220 /*
221 * We are holding the lockref spinlock, and the work was still
222 * queued above. The queued work (glock_work_func) takes that
223 * spinlock before dropping its glock reference(s), so it
224 * cannot have dropped them in the meantime.
225 */
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
227 gl->gl_lockref.count--;
228 }
229}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000230
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500231static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
232 spin_lock(&gl->gl_lockref.lock);
233 __gfs2_glock_queue_work(gl, delay);
234 spin_unlock(&gl->gl_lockref.lock);
235}
236
237static void __gfs2_glock_put(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000238{
Bob Peterson15562c42015-03-16 11:52:05 -0500239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000240 struct address_space *mapping = gfs2_glock2aspace(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000241
Steven Whitehousee66cf162013-10-15 15:18:08 +0100242 lockref_mark_dead(&gl->gl_lockref);
243
Bob Peterson8f6cb402015-01-05 13:25:10 -0500244 gfs2_glock_remove_from_lru(gl);
Steven Whitehousee66cf162013-10-15 15:18:08 +0100245 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehousee66cf162013-10-15 15:18:08 +0100246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
248 trace_gfs2_glock_put(gl);
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000250}
251
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500252/*
253 * Cause the glock to be put in work queue context.
254 */
255void gfs2_glock_queue_put(struct gfs2_glock *gl)
256{
257 gfs2_glock_queue_work(gl, 0);
258}
259
David Teiglandb3b94fa2006-01-16 16:50:04 +0000260/**
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500261 * gfs2_glock_put() - Decrement reference count on glock
262 * @gl: The glock to put
263 *
264 */
265
266void gfs2_glock_put(struct gfs2_glock *gl)
267{
268 if (lockref_put_or_lock(&gl->gl_lockref))
269 return;
270
271 __gfs2_glock_put(gl);
272}
273
274/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100275 * may_grant - check if its ok to grant a new lock
276 * @gl: The glock
277 * @gh: The lock request which we wish to grant
278 *
279 * Returns: true if its ok to grant the lock
280 */
281
282static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500283{
Steven Whitehouse6802e342008-05-21 17:03:22 +0100284 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
285 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
286 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
287 return 0;
288 if (gl->gl_state == gh->gh_state)
289 return 1;
290 if (gh->gh_flags & GL_EXACT)
291 return 0;
Steven Whitehouse209806a2008-07-07 10:07:28 +0100292 if (gl->gl_state == LM_ST_EXCLUSIVE) {
293 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
294 return 1;
295 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
296 return 1;
297 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
299 return 1;
300 return 0;
301}
302
303static void gfs2_holder_wake(struct gfs2_holder *gh)
304{
305 clear_bit(HIF_WAIT, &gh->gh_iflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100306 smp_mb__after_atomic();
Steven Whitehouse6802e342008-05-21 17:03:22 +0100307 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
Bob Petersonad269672019-08-30 12:31:02 -0500308 if (gh->gh_flags & GL_ASYNC) {
309 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
310
311 wake_up(&sdp->sd_async_glock_wait);
312 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100313}
314
315/**
Steven Whitehoused5341a92010-07-23 14:05:51 +0100316 * do_error - Something unexpected has happened during a lock request
317 *
318 */
319
Denys Vlasenkoa527b382016-04-12 12:39:12 -0400320static void do_error(struct gfs2_glock *gl, const int ret)
Steven Whitehoused5341a92010-07-23 14:05:51 +0100321{
322 struct gfs2_holder *gh, *tmp;
323
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
326 continue;
327 if (ret & LM_OUT_ERROR)
328 gh->gh_error = -EIO;
329 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
330 gh->gh_error = GLR_TRYFAILED;
331 else
332 continue;
333 list_del_init(&gh->gh_list);
334 trace_gfs2_glock_queue(gh, 0);
335 gfs2_holder_wake(gh);
336 }
337}
338
339/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100340 * do_promote - promote as many requests as possible on the current queue
341 * @gl: The glock
342 *
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000343 * Returns: 1 if there is a blocked holder at the head of the list, or 2
344 * if a type specific operation is underway.
Steven Whitehouse6802e342008-05-21 17:03:22 +0100345 */
346
347static int do_promote(struct gfs2_glock *gl)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500348__releases(&gl->gl_lockref.lock)
349__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100350{
351 const struct gfs2_glock_operations *glops = gl->gl_ops;
352 struct gfs2_holder *gh, *tmp;
353 int ret;
354
355restart:
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
357 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
358 continue;
359 if (may_grant(gl, gh)) {
360 if (gh->gh_list.prev == &gl->gl_holders &&
361 glops->go_lock) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500362 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100363 /* FIXME: eliminate this eventually */
364 ret = glops->go_lock(gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500365 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100366 if (ret) {
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000367 if (ret == 1)
368 return 2;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100369 gh->gh_error = ret;
370 list_del_init(&gh->gh_list);
Steven Whitehouse63997772009-06-12 08:49:20 +0100371 trace_gfs2_glock_queue(gh, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100372 gfs2_holder_wake(gh);
373 goto restart;
374 }
375 set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse63997772009-06-12 08:49:20 +0100376 trace_gfs2_promote(gh, 1);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100377 gfs2_holder_wake(gh);
378 goto restart;
379 }
380 set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse63997772009-06-12 08:49:20 +0100381 trace_gfs2_promote(gh, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100382 gfs2_holder_wake(gh);
383 continue;
384 }
385 if (gh->gh_list.prev == &gl->gl_holders)
386 return 1;
Steven Whitehoused5341a92010-07-23 14:05:51 +0100387 do_error(gl, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100388 break;
389 }
390 return 0;
391}
392
393/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100394 * find_first_waiter - find the first gh that's waiting for the glock
395 * @gl: the glock
396 */
397
398static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
399{
400 struct gfs2_holder *gh;
401
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
403 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
404 return gh;
405 }
406 return NULL;
407}
408
409/**
410 * state_change - record that the glock is now in a different state
411 * @gl: the glock
412 * @new_state the new state
413 *
414 */
415
416static void state_change(struct gfs2_glock *gl, unsigned int new_state)
417{
418 int held1, held2;
419
420 held1 = (gl->gl_state != LM_ST_UNLOCKED);
421 held2 = (new_state != LM_ST_UNLOCKED);
422
423 if (held1 != held2) {
Steven Whitehousee66cf162013-10-15 15:18:08 +0100424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
Steven Whitehouse6802e342008-05-21 17:03:22 +0100425 if (held2)
Steven Whitehousee66cf162013-10-15 15:18:08 +0100426 gl->gl_lockref.count++;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100427 else
Steven Whitehousee66cf162013-10-15 15:18:08 +0100428 gl->gl_lockref.count--;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100429 }
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +0100430 if (held1 && held2 && list_empty(&gl->gl_holders))
431 clear_bit(GLF_QUEUED, &gl->gl_flags);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100432
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400433 if (new_state != gl->gl_target)
434 /* shorten our minimum hold time */
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
436 GL_GLOCK_MIN_HOLD);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100437 gl->gl_state = new_state;
438 gl->gl_tchange = jiffies;
439}
440
441static void gfs2_demote_wake(struct gfs2_glock *gl)
442{
443 gl->gl_demote_state = LM_ST_EXCLUSIVE;
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100445 smp_mb__after_atomic();
Steven Whitehouse6802e342008-05-21 17:03:22 +0100446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
447}
448
449/**
450 * finish_xmote - The DLM has replied to one of our lock requests
451 * @gl: The glock
452 * @ret: The status from the DLM
453 *
454 */
455
456static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
457{
458 const struct gfs2_glock_operations *glops = gl->gl_ops;
459 struct gfs2_holder *gh;
460 unsigned state = ret & LM_OUT_ST_MASK;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000461 int rv;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500462
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500463 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse63997772009-06-12 08:49:20 +0100464 trace_gfs2_glock_state_change(gl, state);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100465 state_change(gl, state);
466 gh = find_first_waiter(gl);
467
468 /* Demote to UN request arrived during demote to SH or DF */
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
471 gl->gl_target = LM_ST_UNLOCKED;
472
473 /* Check for state != intended state */
474 if (unlikely(state != gl->gl_target)) {
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
476 /* move to back of queue and try next entry */
477 if (ret & LM_OUT_CANCELED) {
478 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
479 list_move_tail(&gh->gh_list, &gl->gl_holders);
480 gh = find_first_waiter(gl);
481 gl->gl_target = gh->gh_state;
482 goto retry;
483 }
484 /* Some error or failed "try lock" - report it */
485 if ((ret & LM_OUT_ERROR) ||
486 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
487 gl->gl_target = gl->gl_state;
488 do_error(gl, ret);
489 goto out;
490 }
491 }
492 switch(state) {
493 /* Unlocked due to conversion deadlock, try again */
494 case LM_ST_UNLOCKED:
495retry:
496 do_xmote(gl, gh, gl->gl_target);
497 break;
498 /* Conversion fails, unlock and try again */
499 case LM_ST_SHARED:
500 case LM_ST_DEFERRED:
501 do_xmote(gl, gh, LM_ST_UNLOCKED);
502 break;
503 default: /* Everything else */
Bob Petersone54c78a2018-10-03 08:47:36 -0500504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
505 gl->gl_target, state);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100506 GLOCK_BUG_ON(gl, 1);
507 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500508 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100509 return;
510 }
511
512 /* Fast path - we got what we asked for */
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
514 gfs2_demote_wake(gl);
515 if (state != LM_ST_UNLOCKED) {
516 if (glops->go_xmote_bh) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500517 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100518 rv = glops->go_xmote_bh(gl, gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500519 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100520 if (rv) {
521 do_error(gl, rv);
522 goto out;
523 }
524 }
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000525 rv = do_promote(gl);
526 if (rv == 2)
527 goto out_locked;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100528 }
529out:
530 clear_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000531out_locked:
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500532 spin_unlock(&gl->gl_lockref.lock);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500533}
534
Steven Whitehouse6802e342008-05-21 17:03:22 +0100535/**
536 * do_xmote - Calls the DLM to change the state of a lock
537 * @gl: The lock state
538 * @gh: The holder (only for promotes)
539 * @target: The target lock state
540 *
541 */
542
543static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500544__releases(&gl->gl_lockref.lock)
545__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100546{
547 const struct gfs2_glock_operations *glops = gl->gl_ops;
Bob Peterson15562c42015-03-16 11:52:05 -0500548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Bob Petersonb58bf402015-07-24 09:45:43 -0500549 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100550 int ret;
551
Bob Petersoneb43e662019-11-14 09:52:15 -0500552 if (unlikely(gfs2_withdrawn(sdp)) &&
Bob Petersoned175452017-05-05 09:43:02 -0500553 target != LM_ST_UNLOCKED)
Bob Peterson0d1c7ae2017-03-03 12:37:14 -0500554 return;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100555 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
556 LM_FLAG_PRIORITY);
Steven Whitehouse921169c2010-11-29 12:50:38 +0000557 GLOCK_BUG_ON(gl, gl->gl_state == target);
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100559 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
560 glops->go_inval) {
Bob Petersond99724c2019-11-15 10:45:41 -0500561 /*
562 * If another process is already doing the invalidate, let that
563 * finish first. The glock state machine will get back to this
564 * holder again later.
565 */
566 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
567 &gl->gl_flags))
568 return;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100569 do_error(gl, 0); /* Fail queued try locks */
570 }
Steven Whitehouse47a25382010-11-30 15:49:31 +0000571 gl->gl_req = target;
Steven Whitehousea2457692012-01-20 10:38:36 +0000572 set_bit(GLF_BLOCKING, &gl->gl_flags);
573 if ((gl->gl_req == LM_ST_UNLOCKED) ||
574 (gl->gl_state == LM_ST_EXCLUSIVE) ||
575 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
576 clear_bit(GLF_BLOCKING, &gl->gl_flags);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500577 spin_unlock(&gl->gl_lockref.lock);
Bob Peterson06dfc302012-10-24 14:41:05 -0400578 if (glops->go_sync)
579 glops->go_sync(gl);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100580 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
581 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
582 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
583
584 gfs2_glock_hold(gl);
Steven Whitehouse921169c2010-11-29 12:50:38 +0000585 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
586 /* lock_dlm */
587 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
Benjamin Marzinski3e11e532016-03-23 14:29:59 -0400588 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
589 target == LM_ST_UNLOCKED &&
590 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
591 finish_xmote(gl, target);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500592 gfs2_glock_queue_work(gl, 0);
Benjamin Marzinski3e11e532016-03-23 14:29:59 -0400593 }
594 else if (ret) {
Bob Petersone54c78a2018-10-03 08:47:36 -0500595 fs_err(sdp, "lm_lock ret %d\n", ret);
Bob Petersoneb43e662019-11-14 09:52:15 -0500596 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
David Teiglanddba2d702012-11-14 13:46:53 -0500597 }
Steven Whitehouse921169c2010-11-29 12:50:38 +0000598 } else { /* lock_nolock */
599 finish_xmote(gl, target);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500600 gfs2_glock_queue_work(gl, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100601 }
Steven Whitehouse921169c2010-11-29 12:50:38 +0000602
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500603 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100604}
605
606/**
607 * find_first_holder - find the first "holder" gh
608 * @gl: the glock
609 */
610
611static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
612{
613 struct gfs2_holder *gh;
614
615 if (!list_empty(&gl->gl_holders)) {
616 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
617 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
618 return gh;
619 }
620 return NULL;
621}
622
623/**
624 * run_queue - do all outstanding tasks related to a glock
625 * @gl: The glock in question
626 * @nonblock: True if we must not block in run_queue
627 *
628 */
629
630static void run_queue(struct gfs2_glock *gl, const int nonblock)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500631__releases(&gl->gl_lockref.lock)
632__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100633{
634 struct gfs2_holder *gh = NULL;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000635 int ret;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100636
637 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
638 return;
639
640 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
641
642 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
643 gl->gl_demote_state != gl->gl_state) {
644 if (find_first_holder(gl))
Steven Whitehoused8348de2009-02-05 10:12:38 +0000645 goto out_unlock;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100646 if (nonblock)
647 goto out_sched;
648 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
Steven Whitehouse265d529c2008-07-07 10:02:36 +0100649 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100650 gl->gl_target = gl->gl_demote_state;
651 } else {
652 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
653 gfs2_demote_wake(gl);
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000654 ret = do_promote(gl);
655 if (ret == 0)
Steven Whitehoused8348de2009-02-05 10:12:38 +0000656 goto out_unlock;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000657 if (ret == 2)
Steven Whitehousea228df62009-04-07 14:01:34 +0100658 goto out;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100659 gh = find_first_waiter(gl);
660 gl->gl_target = gh->gh_state;
661 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
662 do_error(gl, 0); /* Fail queued try locks */
663 }
664 do_xmote(gl, gh, gl->gl_target);
Steven Whitehousea228df62009-04-07 14:01:34 +0100665out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100666 return;
667
668out_sched:
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100669 clear_bit(GLF_LOCK, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100670 smp_mb__after_atomic();
Steven Whitehousee66cf162013-10-15 15:18:08 +0100671 gl->gl_lockref.count++;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500672 __gfs2_glock_queue_work(gl, 0);
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100673 return;
674
Steven Whitehoused8348de2009-02-05 10:12:38 +0000675out_unlock:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100676 clear_bit(GLF_LOCK, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100677 smp_mb__after_atomic();
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100678 return;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100679}
680
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500681static void delete_work_func(struct work_struct *work)
682{
683 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
Bob Peterson15562c42015-03-16 11:52:05 -0500684 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Andreas Gruenbacherec5ec662016-06-14 12:23:59 -0500685 struct inode *inode;
Steven Whitehouse044b9412010-11-03 20:01:07 +0000686 u64 no_addr = gl->gl_name.ln_number;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500687
Bob Petersona4923862015-12-07 16:24:27 -0600688 /* If someone's using this glock to create a new dinode, the block must
689 have been freed by another node, then re-used, in which case our
690 iopen callback is too late after the fact. Ignore it. */
691 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
692 goto out;
693
Andreas Gruenbacherec5ec662016-06-14 12:23:59 -0500694 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
Kefeng Wang15a798f2019-06-05 22:24:24 +0800695 if (!IS_ERR_OR_NULL(inode)) {
Steven Whitehouse044b9412010-11-03 20:01:07 +0000696 d_prune_aliases(inode);
697 iput(inode);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500698 }
Bob Petersona4923862015-12-07 16:24:27 -0600699out:
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500700 gfs2_glock_put(gl);
701}
702
Steven Whitehouse6802e342008-05-21 17:03:22 +0100703static void glock_work_func(struct work_struct *work)
704{
705 unsigned long delay = 0;
706 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500707 unsigned int drop_refs = 1;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100708
Steven Whitehouse26bb7502009-11-27 10:31:11 +0000709 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
Steven Whitehouse6802e342008-05-21 17:03:22 +0100710 finish_xmote(gl, gl->gl_reply);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500711 drop_refs++;
Steven Whitehouse26bb7502009-11-27 10:31:11 +0000712 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500713 spin_lock(&gl->gl_lockref.lock);
Bob Petersonf90e5b52011-05-24 10:44:42 -0400714 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
Steven Whitehouse265d529c2008-07-07 10:02:36 +0100715 gl->gl_state != LM_ST_UNLOCKED &&
716 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
Steven Whitehouse6802e342008-05-21 17:03:22 +0100717 unsigned long holdtime, now = jiffies;
Bob Petersonf90e5b52011-05-24 10:44:42 -0400718
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400719 holdtime = gl->gl_tchange + gl->gl_hold_time;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100720 if (time_before(now, holdtime))
721 delay = holdtime - now;
Bob Petersonf90e5b52011-05-24 10:44:42 -0400722
723 if (!delay) {
724 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
725 set_bit(GLF_DEMOTE, &gl->gl_flags);
726 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100727 }
728 run_queue(gl, 0);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500729 if (delay) {
730 /* Keep one glock reference for the work we requeue. */
731 drop_refs--;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400732 if (gl->gl_name.ln_type != LM_TYPE_INODE)
733 delay = 0;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500734 __gfs2_glock_queue_work(gl, delay);
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400735 }
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500736
737 /*
738 * Drop the remaining glock references manually here. (Mind that
739 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
740 * here as well.)
741 */
742 gl->gl_lockref.count -= drop_refs;
743 if (!gl->gl_lockref.count) {
744 __gfs2_glock_put(gl);
745 return;
746 }
747 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100748}
749
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500750static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
751 struct gfs2_glock *new)
752{
753 struct wait_glock_queue wait;
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500754 wait_queue_head_t *wq = glock_waitqueue(name);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500755 struct gfs2_glock *gl;
756
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500757 wait.name = name;
758 init_wait(&wait.wait);
759 wait.wait.func = glock_wake_function;
760
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500761again:
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500762 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500763 rcu_read_lock();
764 if (new) {
765 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
766 &new->gl_node, ht_parms);
767 if (IS_ERR(gl))
768 goto out;
769 } else {
770 gl = rhashtable_lookup_fast(&gl_hash_table,
771 name, ht_parms);
772 }
773 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
774 rcu_read_unlock();
775 schedule();
776 goto again;
777 }
778out:
779 rcu_read_unlock();
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500780 finish_wait(wq, &wait.wait);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500781 return gl;
782}
783
David Teiglandb3b94fa2006-01-16 16:50:04 +0000784/**
785 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
786 * @sdp: The GFS2 superblock
787 * @number: the lock number
788 * @glops: The glock_operations to use
789 * @create: If 0, don't create the glock if it doesn't exist
790 * @glp: the glock is returned here
791 *
792 * This does not lock a glock, just finds/creates structures for one.
793 *
794 * Returns: errno
795 */
796
Steven Whitehousecd915492006-09-04 12:49:07 -0400797int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400798 const struct gfs2_glock_operations *glops, int create,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000799 struct gfs2_glock **glp)
800{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000801 struct super_block *s = sdp->sd_vfs;
Bob Peterson15562c42015-03-16 11:52:05 -0500802 struct lm_lockname name = { .ln_number = number,
803 .ln_type = glops->go_type,
804 .ln_sbd = sdp };
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100805 struct gfs2_glock *gl, *tmp;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000806 struct address_space *mapping;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000807 struct kmem_cache *cachep;
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100808 int ret = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000809
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500810 gl = find_insert_glock(&name, NULL);
811 if (gl) {
812 *glp = gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000813 return 0;
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500814 }
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000815 if (!create)
816 return -ENOENT;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000817
Steven Whitehouse009d8512009-12-08 12:12:13 +0000818 if (glops->go_flags & GLOF_ASPACE)
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000819 cachep = gfs2_glock_aspace_cachep;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000820 else
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000821 cachep = gfs2_glock_cachep;
Steven Whitehousefe0bbd22014-06-23 14:50:20 +0100822 gl = kmem_cache_alloc(cachep, GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000823 if (!gl)
824 return -ENOMEM;
825
David Teiglanddba2d702012-11-14 13:46:53 -0500826 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
David Teiglanddba2d702012-11-14 13:46:53 -0500827
828 if (glops->go_flags & GLOF_LVB) {
Steven Whitehousefe0bbd22014-06-23 14:50:20 +0100829 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
David Teigland4e2f8842012-11-14 13:47:37 -0500830 if (!gl->gl_lksb.sb_lvbptr) {
David Teiglanddba2d702012-11-14 13:46:53 -0500831 kmem_cache_free(cachep, gl);
832 return -ENOMEM;
833 }
David Teiglanddba2d702012-11-14 13:46:53 -0500834 }
835
Steven Whitehouse8f052282010-01-29 15:21:27 +0000836 atomic_inc(&sdp->sd_glock_disposal);
Bob Peterson88ffbf32015-03-16 11:02:46 -0500837 gl->gl_node.next = NULL;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400838 gl->gl_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000839 gl->gl_name = name;
Steven Whitehousee66cf162013-10-15 15:18:08 +0100840 gl->gl_lockref.count = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000841 gl->gl_state = LM_ST_UNLOCKED;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100842 gl->gl_target = LM_ST_UNLOCKED;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500843 gl->gl_demote_state = LM_ST_EXCLUSIVE;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000844 gl->gl_ops = glops;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100845 gl->gl_dstamp = 0;
Steven Whitehousea2457692012-01-20 10:38:36 +0000846 preempt_disable();
847 /* We use the global stats to estimate the initial per-glock stats */
848 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
849 preempt_enable();
850 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
851 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500852 gl->gl_tchange = jiffies;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400853 gl->gl_object = NULL;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400854 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500855 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500856 INIT_WORK(&gl->gl_delete, delete_work_func);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000857
Steven Whitehouse009d8512009-12-08 12:12:13 +0000858 mapping = gfs2_glock2aspace(gl);
859 if (mapping) {
860 mapping->a_ops = &gfs2_meta_aops;
861 mapping->host = s->s_bdev->bd_inode;
862 mapping->flags = 0;
863 mapping_set_gfp_mask(mapping, GFP_NOFS);
Rafael Aquini252aa6f2012-12-11 16:02:35 -0800864 mapping->private_data = NULL;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000865 mapping->writeback_index = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000866 }
867
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500868 tmp = find_insert_glock(&name, gl);
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100869 if (!tmp) {
Bob Peterson88ffbf32015-03-16 11:02:46 -0500870 *glp = gl;
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100871 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000872 }
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100873 if (IS_ERR(tmp)) {
874 ret = PTR_ERR(tmp);
875 goto out_free;
876 }
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500877 *glp = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000878
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100879out_free:
Bob Peterson88ffbf32015-03-16 11:02:46 -0500880 kfree(gl->gl_lksb.sb_lvbptr);
881 kmem_cache_free(cachep, gl);
882 atomic_dec(&sdp->sd_glock_disposal);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000883
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100884out:
Bob Peterson88ffbf32015-03-16 11:02:46 -0500885 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000886}
887
888/**
889 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
890 * @gl: the glock
891 * @state: the state we're requesting
892 * @flags: the modifier flags
893 * @gh: the holder structure
894 *
895 */
896
Bob Petersonb58bf402015-07-24 09:45:43 -0500897void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000898 struct gfs2_holder *gh)
899{
900 INIT_LIST_HEAD(&gh->gh_list);
901 gh->gh_gl = gl;
Fabian Frederickd29c0af2014-10-03 20:15:36 +0200902 gh->gh_ip = _RET_IP_;
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800903 gh->gh_owner_pid = get_pid(task_pid(current));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000904 gh->gh_state = state;
905 gh->gh_flags = flags;
906 gh->gh_error = 0;
907 gh->gh_iflags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000908 gfs2_glock_hold(gl);
909}
910
911/**
912 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
913 * @state: the state we're requesting
914 * @flags: the modifier flags
915 * @gh: the holder structure
916 *
917 * Don't mess with the glock.
918 *
919 */
920
Bob Petersonb58bf402015-07-24 09:45:43 -0500921void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000922{
923 gh->gh_state = state;
Steven Whitehouse579b78a2006-04-26 14:58:26 -0400924 gh->gh_flags = flags;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000925 gh->gh_iflags = 0;
Fabian Frederickd29c0af2014-10-03 20:15:36 +0200926 gh->gh_ip = _RET_IP_;
Markus Elfring30badc92014-11-18 11:31:23 +0100927 put_pid(gh->gh_owner_pid);
Bob Peterson1a0eae82010-04-14 11:58:16 -0400928 gh->gh_owner_pid = get_pid(task_pid(current));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000929}
930
931/**
932 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
933 * @gh: the holder structure
934 *
935 */
936
937void gfs2_holder_uninit(struct gfs2_holder *gh)
938{
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800939 put_pid(gh->gh_owner_pid);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000940 gfs2_glock_put(gh->gh_gl);
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -0500941 gfs2_holder_mark_uninitialized(gh);
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500942 gh->gh_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000943}
944
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500945static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
946 unsigned long start_time)
947{
948 /* Have we waited longer that a second? */
949 if (time_after(jiffies, start_time + HZ)) {
950 /* Lengthen the minimum hold time. */
951 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
952 GL_GLOCK_MAX_HOLD);
953 }
954}
955
Steven Whitehousefe64d512009-05-19 10:01:18 +0100956/**
Bob Peterson07a79042012-08-09 12:48:44 -0500957 * gfs2_glock_wait - wait on a glock acquisition
958 * @gh: the glock holder
959 *
960 * Returns: 0 on success
961 */
962
963int gfs2_glock_wait(struct gfs2_holder *gh)
Steven Whitehousefee852e2007-01-17 15:33:23 +0000964{
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500965 unsigned long start_time = jiffies;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400966
Steven Whitehousefee852e2007-01-17 15:33:23 +0000967 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +1000968 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500969 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
Bob Peterson07a79042012-08-09 12:48:44 -0500970 return gh->gh_error;
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100971}
972
Bob Petersonad269672019-08-30 12:31:02 -0500973static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
974{
975 int i;
976
977 for (i = 0; i < num_gh; i++)
978 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
979 return 1;
980 return 0;
981}
982
983/**
984 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
985 * @num_gh: the number of holders in the array
986 * @ghs: the glock holder array
987 *
988 * Returns: 0 on success, meaning all glocks have been granted and are held.
989 * -ESTALE if the request timed out, meaning all glocks were released,
990 * and the caller should retry the operation.
991 */
992
993int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
994{
995 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
996 int i, ret = 0, timeout = 0;
997 unsigned long start_time = jiffies;
998 bool keep_waiting;
999
1000 might_sleep();
1001 /*
1002 * Total up the (minimum hold time * 2) of all glocks and use that to
1003 * determine the max amount of time we should wait.
1004 */
1005 for (i = 0; i < num_gh; i++)
1006 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1007
1008wait_for_dlm:
1009 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1010 !glocks_pending(num_gh, ghs), timeout))
1011 ret = -ESTALE; /* request timed out. */
1012
1013 /*
1014 * If dlm granted all our requests, we need to adjust the glock
1015 * minimum hold time values according to how long we waited.
1016 *
1017 * If our request timed out, we need to repeatedly release any held
1018 * glocks we acquired thus far to allow dlm to acquire the remaining
1019 * glocks without deadlocking. We cannot currently cancel outstanding
1020 * glock acquisitions.
1021 *
1022 * The HIF_WAIT bit tells us which requests still need a response from
1023 * dlm.
1024 *
1025 * If dlm sent us any errors, we return the first error we find.
1026 */
1027 keep_waiting = false;
1028 for (i = 0; i < num_gh; i++) {
1029 /* Skip holders we have already dequeued below. */
1030 if (!gfs2_holder_queued(&ghs[i]))
1031 continue;
1032 /* Skip holders with a pending DLM response. */
1033 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1034 keep_waiting = true;
1035 continue;
1036 }
1037
1038 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1039 if (ret == -ESTALE)
1040 gfs2_glock_dq(&ghs[i]);
1041 else
1042 gfs2_glock_update_hold_time(ghs[i].gh_gl,
1043 start_time);
1044 }
1045 if (!ret)
1046 ret = ghs[i].gh_error;
1047 }
1048
1049 if (keep_waiting)
1050 goto wait_for_dlm;
1051
1052 /*
1053 * At this point, we've either acquired all locks or released them all.
1054 */
1055 return ret;
1056}
1057
David Teiglandb3b94fa2006-01-16 16:50:04 +00001058/**
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001059 * handle_callback - process a demote request
David Teiglandb3b94fa2006-01-16 16:50:04 +00001060 * @gl: the glock
1061 * @state: the state the caller wants us to change to
1062 *
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001063 * There are only two requests that we are going to see in actual
1064 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teiglandb3b94fa2006-01-16 16:50:04 +00001065 */
1066
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001067static void handle_callback(struct gfs2_glock *gl, unsigned int state,
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001068 unsigned long delay, bool remote)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001069{
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001070 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
1071
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001072 set_bit(bit, &gl->gl_flags);
1073 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001074 gl->gl_demote_state = state;
1075 gl->gl_demote_time = jiffies;
Josef Whiter26caee52007-07-23 10:02:40 +01001076 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1077 gl->gl_demote_state != state) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001078 gl->gl_demote_state = LM_ST_UNLOCKED;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001079 }
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05001080 if (gl->gl_ops->go_callback)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001081 gl->gl_ops->go_callback(gl, remote);
Steven Whitehouse7bd8b2e2013-04-10 10:32:05 +01001082 trace_gfs2_demote_rq(gl, remote);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001083}
1084
Steven Whitehouse6802e342008-05-21 17:03:22 +01001085void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
Robert Peterson7c52b162007-03-16 10:26:37 +00001086{
Joe Perches5e690692010-11-09 16:35:20 -08001087 struct va_format vaf;
Robert Peterson7c52b162007-03-16 10:26:37 +00001088 va_list args;
1089
1090 va_start(args, fmt);
Joe Perches5e690692010-11-09 16:35:20 -08001091
Steven Whitehouse6802e342008-05-21 17:03:22 +01001092 if (seq) {
Steven Whitehouse1bb49302012-06-11 13:26:50 +01001093 seq_vprintf(seq, fmt, args);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001094 } else {
Joe Perches5e690692010-11-09 16:35:20 -08001095 vaf.fmt = fmt;
1096 vaf.va = &args;
1097
Joe Perchesd77d1b52014-03-06 12:10:45 -08001098 pr_err("%pV", &vaf);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001099 }
Joe Perches5e690692010-11-09 16:35:20 -08001100
Robert Peterson7c52b162007-03-16 10:26:37 +00001101 va_end(args);
1102}
1103
David Teiglandb3b94fa2006-01-16 16:50:04 +00001104/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001105 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1106 * @gh: the holder structure to add
1107 *
Steven Whitehouse6802e342008-05-21 17:03:22 +01001108 * Eventually we should move the recursive locking trap to a
1109 * debugging option or something like that. This is the fast
1110 * path and needs to have the minimum number of distractions.
1111 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001112 */
1113
Steven Whitehouse6802e342008-05-21 17:03:22 +01001114static inline void add_to_queue(struct gfs2_holder *gh)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001115__releases(&gl->gl_lockref.lock)
1116__acquires(&gl->gl_lockref.lock)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001117{
1118 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001119 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001120 struct list_head *insert_pt = NULL;
1121 struct gfs2_holder *gh2;
Bob Petersone5dc76b2012-08-09 12:48:46 -05001122 int try_futile = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001123
Bob Petersonad269672019-08-30 12:31:02 -05001124 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
Steven Whitehousefee852e2007-01-17 15:33:23 +00001125 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
Bob Petersonad269672019-08-30 12:31:02 -05001126 GLOCK_BUG_ON(gl, true);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001127
Steven Whitehouse6802e342008-05-21 17:03:22 +01001128 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1129 if (test_bit(GLF_LOCK, &gl->gl_flags))
Bob Petersone5dc76b2012-08-09 12:48:46 -05001130 try_futile = !may_grant(gl, gh);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001131 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1132 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001133 }
1134
Steven Whitehouse6802e342008-05-21 17:03:22 +01001135 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1136 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1137 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1138 goto trap_recursive;
Bob Petersone5dc76b2012-08-09 12:48:46 -05001139 if (try_futile &&
1140 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001141fail:
1142 gh->gh_error = GLR_TRYFAILED;
1143 gfs2_holder_wake(gh);
1144 return;
1145 }
1146 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1147 continue;
1148 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1149 insert_pt = &gh2->gh_list;
1150 }
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001151 set_bit(GLF_QUEUED, &gl->gl_flags);
Steven Whitehouseedae38a2011-01-31 09:38:12 +00001152 trace_gfs2_glock_queue(gh, 1);
Steven Whitehousea2457692012-01-20 10:38:36 +00001153 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1154 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001155 if (likely(insert_pt == NULL)) {
1156 list_add_tail(&gh->gh_list, &gl->gl_holders);
1157 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1158 goto do_cancel;
1159 return;
1160 }
1161 list_add_tail(&gh->gh_list, insert_pt);
1162do_cancel:
1163 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1164 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001165 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse048bca22008-05-23 14:46:04 +01001166 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001167 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001168 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001169 }
1170 return;
1171
1172trap_recursive:
Bob Petersone54c78a2018-10-03 08:47:36 -05001173 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1174 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1175 fs_err(sdp, "lock type: %d req lock state : %d\n",
Steven Whitehouse6802e342008-05-21 17:03:22 +01001176 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
Bob Petersone54c78a2018-10-03 08:47:36 -05001177 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1178 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1179 fs_err(sdp, "lock type: %d req lock state : %d\n",
Steven Whitehouse6802e342008-05-21 17:03:22 +01001180 gh->gh_gl->gl_name.ln_type, gh->gh_state);
Bob Peterson3792ce92019-05-09 09:21:48 -05001181 gfs2_dump_glock(NULL, gl, true);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001182 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001183}
1184
1185/**
1186 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1187 * @gh: the holder structure
1188 *
1189 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1190 *
1191 * Returns: 0, GLR_TRYFAILED, or errno on failure
1192 */
1193
1194int gfs2_glock_nq(struct gfs2_holder *gh)
1195{
1196 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001197 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001198 int error = 0;
1199
Bob Petersoneb43e662019-11-14 09:52:15 -05001200 if (unlikely(gfs2_withdrawn(sdp)))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001201 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001202
Steven Whitehousef42ab082011-04-14 16:50:31 +01001203 if (test_bit(GLF_LRU, &gl->gl_flags))
1204 gfs2_glock_remove_from_lru(gl);
1205
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001206 spin_lock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001207 add_to_queue(gh);
Bob Peterson01b172b2014-03-12 10:32:20 -04001208 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1209 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001210 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
Bob Peterson01b172b2014-03-12 10:32:20 -04001211 gl->gl_lockref.count++;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001212 __gfs2_glock_queue_work(gl, 0);
Bob Peterson01b172b2014-03-12 10:32:20 -04001213 }
Steven Whitehouse6802e342008-05-21 17:03:22 +01001214 run_queue(gl, 1);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001215 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001216
Steven Whitehouse6802e342008-05-21 17:03:22 +01001217 if (!(gh->gh_flags & GL_ASYNC))
1218 error = gfs2_glock_wait(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001219
David Teiglandb3b94fa2006-01-16 16:50:04 +00001220 return error;
1221}
1222
1223/**
1224 * gfs2_glock_poll - poll to see if an async request has been completed
1225 * @gh: the holder
1226 *
1227 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1228 */
1229
1230int gfs2_glock_poll(struct gfs2_holder *gh)
1231{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001232 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001233}
1234
1235/**
1236 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1237 * @gh: the glock holder
1238 *
1239 */
1240
1241void gfs2_glock_dq(struct gfs2_holder *gh)
1242{
1243 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001244 const struct gfs2_glock_operations *glops = gl->gl_ops;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001245 unsigned delay = 0;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001246 int fast_path = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001247
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001248 spin_lock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001249 if (gh->gh_flags & GL_NOCACHE)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001250 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001251
David Teiglandb3b94fa2006-01-16 16:50:04 +00001252 list_del_init(&gh->gh_list);
Bob Peterson7508abc2015-12-18 11:54:55 -06001253 clear_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001254 if (find_first_holder(gl) == NULL) {
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +00001255 if (glops->go_unlock) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001256 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001257 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001258 glops->go_unlock(gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001259 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001260 clear_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +00001261 }
Steven Whitehouse6802e342008-05-21 17:03:22 +01001262 if (list_empty(&gl->gl_holders) &&
1263 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1264 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1265 fast_path = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001266 }
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001267 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
Bob Peterson4abb6ad92012-08-09 12:48:43 -05001268 gfs2_glock_add_to_lru(gl);
1269
Steven Whitehouse63997772009-06-12 08:49:20 +01001270 trace_gfs2_glock_queue(gh, 0);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001271 if (unlikely(!fast_path)) {
1272 gl->gl_lockref.count++;
1273 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1274 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1275 gl->gl_name.ln_type == LM_TYPE_INODE)
1276 delay = gl->gl_hold_time;
1277 __gfs2_glock_queue_work(gl, delay);
1278 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001279 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001280}
1281
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001282void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1283{
1284 struct gfs2_glock *gl = gh->gh_gl;
1285 gfs2_glock_dq(gh);
Bob Peterson81e1d452012-08-09 12:48:45 -05001286 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001287 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001288}
1289
David Teiglandb3b94fa2006-01-16 16:50:04 +00001290/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001291 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1292 * @gh: the holder structure
1293 *
1294 */
1295
1296void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1297{
1298 gfs2_glock_dq(gh);
1299 gfs2_holder_uninit(gh);
1300}
1301
1302/**
1303 * gfs2_glock_nq_num - acquire a glock based on lock number
1304 * @sdp: the filesystem
1305 * @number: the lock number
1306 * @glops: the glock operations for the type of glock
1307 * @state: the state to acquire the glock in
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001308 * @flags: modifier flags for the acquisition
David Teiglandb3b94fa2006-01-16 16:50:04 +00001309 * @gh: the struct gfs2_holder
1310 *
1311 * Returns: errno
1312 */
1313
Steven Whitehousecd915492006-09-04 12:49:07 -04001314int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001315 const struct gfs2_glock_operations *glops,
Bob Petersonb58bf402015-07-24 09:45:43 -05001316 unsigned int state, u16 flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001317{
1318 struct gfs2_glock *gl;
1319 int error;
1320
1321 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1322 if (!error) {
1323 error = gfs2_glock_nq_init(gl, state, flags, gh);
1324 gfs2_glock_put(gl);
1325 }
1326
1327 return error;
1328}
1329
1330/**
1331 * glock_compare - Compare two struct gfs2_glock structures for sorting
1332 * @arg_a: the first structure
1333 * @arg_b: the second structure
1334 *
1335 */
1336
1337static int glock_compare(const void *arg_a, const void *arg_b)
1338{
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001339 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1340 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1341 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1342 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001343
1344 if (a->ln_number > b->ln_number)
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001345 return 1;
1346 if (a->ln_number < b->ln_number)
1347 return -1;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -05001348 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001349 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001350}
1351
1352/**
1353 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1354 * @num_gh: the number of structures
1355 * @ghs: an array of struct gfs2_holder structures
1356 *
1357 * Returns: 0 on success (all glocks acquired),
1358 * errno on failure (no glocks acquired)
1359 */
1360
1361static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1362 struct gfs2_holder **p)
1363{
1364 unsigned int x;
1365 int error = 0;
1366
1367 for (x = 0; x < num_gh; x++)
1368 p[x] = &ghs[x];
1369
1370 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1371
1372 for (x = 0; x < num_gh; x++) {
1373 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1374
1375 error = gfs2_glock_nq(p[x]);
1376 if (error) {
1377 while (x--)
1378 gfs2_glock_dq(p[x]);
1379 break;
1380 }
1381 }
1382
1383 return error;
1384}
1385
1386/**
1387 * gfs2_glock_nq_m - acquire multiple glocks
1388 * @num_gh: the number of structures
1389 * @ghs: an array of struct gfs2_holder structures
1390 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001391 *
1392 * Returns: 0 on success (all glocks acquired),
1393 * errno on failure (no glocks acquired)
1394 */
1395
1396int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1397{
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001398 struct gfs2_holder *tmp[4];
1399 struct gfs2_holder **pph = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001400 int error = 0;
1401
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001402 switch(num_gh) {
1403 case 0:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001404 return 0;
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001405 case 1:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001406 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1407 return gfs2_glock_nq(ghs);
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001408 default:
1409 if (num_gh <= 4)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001410 break;
Kees Cook6da2ec52018-06-12 13:55:00 -07001411 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1412 GFP_NOFS);
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001413 if (!pph)
1414 return -ENOMEM;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001415 }
1416
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001417 error = nq_m_sync(num_gh, ghs, pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001418
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001419 if (pph != tmp)
1420 kfree(pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001421
1422 return error;
1423}
1424
1425/**
1426 * gfs2_glock_dq_m - release multiple glocks
1427 * @num_gh: the number of structures
1428 * @ghs: an array of struct gfs2_holder structures
1429 *
1430 */
1431
1432void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1433{
Bob Petersonfa1bbde2011-03-10 11:41:57 -05001434 while (num_gh--)
1435 gfs2_glock_dq(&ghs[num_gh]);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001436}
1437
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001438void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
Steven Whitehouseda755fd2008-01-30 15:34:04 +00001439{
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001440 unsigned long delay = 0;
1441 unsigned long holdtime;
1442 unsigned long now = jiffies;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001443
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001444 gfs2_glock_hold(gl);
Bob Peterson7cf8dcd2011-06-15 11:41:48 -04001445 holdtime = gl->gl_tchange + gl->gl_hold_time;
1446 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1447 gl->gl_name.ln_type == LM_TYPE_INODE) {
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001448 if (time_before(now, holdtime))
1449 delay = holdtime - now;
1450 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
Bob Peterson7cf8dcd2011-06-15 11:41:48 -04001451 delay = gl->gl_hold_time;
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001452 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001453
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001454 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001455 handle_callback(gl, state, delay, true);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001456 __gfs2_glock_queue_work(gl, delay);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001457 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001458}
1459
1460/**
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001461 * gfs2_should_freeze - Figure out if glock should be frozen
1462 * @gl: The glock in question
1463 *
1464 * Glocks are not frozen if (a) the result of the dlm operation is
1465 * an error, (b) the locking operation was an unlock operation or
1466 * (c) if there is a "noexp" flagged request anywhere in the queue
1467 *
1468 * Returns: 1 if freezing should occur, 0 otherwise
1469 */
1470
1471static int gfs2_should_freeze(const struct gfs2_glock *gl)
1472{
1473 const struct gfs2_holder *gh;
1474
1475 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1476 return 0;
1477 if (gl->gl_target == LM_ST_UNLOCKED)
1478 return 0;
1479
1480 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1481 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1482 continue;
1483 if (LM_FLAG_NOEXP & gh->gh_flags)
1484 return 0;
1485 }
1486
1487 return 1;
1488}
1489
1490/**
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001491 * gfs2_glock_complete - Callback used by locking
1492 * @gl: Pointer to the glock
1493 * @ret: The return value from the dlm
David Teiglandb3b94fa2006-01-16 16:50:04 +00001494 *
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001495 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
Steven Whitehouse47a25382010-11-30 15:49:31 +00001496 * to use a bitfield shared with other glock state fields.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001497 */
1498
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001499void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001500{
Bob Peterson15562c42015-03-16 11:52:05 -05001501 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001502
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001503 spin_lock(&gl->gl_lockref.lock);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001504 gl->gl_reply = ret;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001505
David Teiglande0c2a9a2012-01-09 17:18:05 -05001506 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001507 if (gfs2_should_freeze(gl)) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001508 set_bit(GLF_FROZEN, &gl->gl_flags);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001509 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001510 return;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001511 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001512 }
Steven Whitehouse47a25382010-11-30 15:49:31 +00001513
Steven Whitehousee66cf162013-10-15 15:18:08 +01001514 gl->gl_lockref.count++;
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001515 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001516 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001517 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001518}
1519
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001520static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1521{
1522 struct gfs2_glock *gla, *glb;
1523
1524 gla = list_entry(a, struct gfs2_glock, gl_lru);
1525 glb = list_entry(b, struct gfs2_glock, gl_lru);
1526
1527 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1528 return 1;
1529 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1530 return -1;
1531
1532 return 0;
1533}
1534
1535/**
1536 * gfs2_dispose_glock_lru - Demote a list of glocks
1537 * @list: The list to dispose of
1538 *
1539 * Disposing of glocks may involve disk accesses, so that here we sort
1540 * the glocks by number (i.e. disk location of the inodes) so that if
1541 * there are any such accesses, they'll be sent in order (mostly).
1542 *
1543 * Must be called under the lru_lock, but may drop and retake this
1544 * lock. While the lru_lock is dropped, entries may vanish from the
1545 * list, but no new entries will appear on the list (since it is
1546 * private)
1547 */
1548
1549static void gfs2_dispose_glock_lru(struct list_head *list)
1550__releases(&lru_lock)
1551__acquires(&lru_lock)
1552{
1553 struct gfs2_glock *gl;
1554
1555 list_sort(NULL, list, glock_cmp);
1556
1557 while(!list_empty(list)) {
1558 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1559 list_del_init(&gl->gl_lru);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001560 if (!spin_trylock(&gl->gl_lockref.lock)) {
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001561add_back_to_lru:
Steven Whitehousee66cf162013-10-15 15:18:08 +01001562 list_add(&gl->gl_lru, &lru_list);
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001563 set_bit(GLF_LRU, &gl->gl_flags);
Steven Whitehousee66cf162013-10-15 15:18:08 +01001564 atomic_inc(&lru_count);
1565 continue;
1566 }
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001567 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001568 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001569 goto add_back_to_lru;
1570 }
Steven Whitehousee66cf162013-10-15 15:18:08 +01001571 gl->gl_lockref.count++;
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001572 if (demote_ok(gl))
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001573 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001574 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001575 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001576 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001577 cond_resched_lock(&lru_lock);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001578 }
1579}
1580
Steven Whitehouse2a005852012-12-14 12:28:30 +00001581/**
1582 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1583 * @nr: The number of entries to scan
1584 *
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001585 * This function selects the entries on the LRU which are able to
1586 * be demoted, and then kicks off the process by calling
1587 * gfs2_dispose_glock_lru() above.
Steven Whitehouse2a005852012-12-14 12:28:30 +00001588 */
David Teiglandb3b94fa2006-01-16 16:50:04 +00001589
Dave Chinner1ab6c492013-08-28 10:18:09 +10001590static long gfs2_scan_glock_lru(int nr)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001591{
1592 struct gfs2_glock *gl;
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001593 LIST_HEAD(skipped);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001594 LIST_HEAD(dispose);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001595 long freed = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001596
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001597 spin_lock(&lru_lock);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001598 while ((nr-- >= 0) && !list_empty(&lru_list)) {
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001599 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001600
1601 /* Test for being demotable */
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001602 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001603 list_move(&gl->gl_lru, &dispose);
1604 atomic_dec(&lru_count);
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001605 clear_bit(GLF_LRU, &gl->gl_flags);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001606 freed++;
Steven Whitehouse2163b1e2009-06-25 16:30:26 +01001607 continue;
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001608 }
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001609
1610 list_move(&gl->gl_lru, &skipped);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001611 }
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001612 list_splice(&skipped, &lru_list);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001613 if (!list_empty(&dispose))
1614 gfs2_dispose_glock_lru(&dispose);
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001615 spin_unlock(&lru_lock);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001616
1617 return freed;
Steven Whitehouse2a005852012-12-14 12:28:30 +00001618}
1619
Dave Chinner1ab6c492013-08-28 10:18:09 +10001620static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1621 struct shrink_control *sc)
Steven Whitehouse2a005852012-12-14 12:28:30 +00001622{
Dave Chinner1ab6c492013-08-28 10:18:09 +10001623 if (!(sc->gfp_mask & __GFP_FS))
1624 return SHRINK_STOP;
1625 return gfs2_scan_glock_lru(sc->nr_to_scan);
1626}
Steven Whitehouse2a005852012-12-14 12:28:30 +00001627
Dave Chinner1ab6c492013-08-28 10:18:09 +10001628static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1629 struct shrink_control *sc)
1630{
Glauber Costa55f841c2013-08-28 10:17:53 +10001631 return vfs_pressure_ratio(atomic_read(&lru_count));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001632}
1633
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001634static struct shrinker glock_shrinker = {
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001635 .seeks = DEFAULT_SEEKS,
Dave Chinner1ab6c492013-08-28 10:18:09 +10001636 .count_objects = gfs2_glock_shrink_count,
1637 .scan_objects = gfs2_glock_shrink_scan,
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001638};
1639
David Teiglandb3b94fa2006-01-16 16:50:04 +00001640/**
1641 * examine_bucket - Call a function for glock in a hash bucket
1642 * @examiner: the function
1643 * @sdp: the filesystem
1644 * @bucket: the bucket
1645 *
Herbert Xu98687f42017-02-11 19:26:45 +08001646 * Note that the function can be called multiple times on the same
1647 * object. So the user must ensure that the function can cope with
1648 * that.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001649 */
1650
Bob Peterson88ffbf32015-03-16 11:02:46 -05001651static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001652{
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001653 struct gfs2_glock *gl;
Herbert Xu98687f42017-02-11 19:26:45 +08001654 struct rhashtable_iter iter;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001655
Herbert Xu98687f42017-02-11 19:26:45 +08001656 rhashtable_walk_enter(&gl_hash_table, &iter);
1657
1658 do {
Tom Herbert97a6ec42017-12-04 10:31:41 -08001659 rhashtable_walk_start(&iter);
Herbert Xu98687f42017-02-11 19:26:45 +08001660
1661 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
Bob Peterson27c3b412017-08-18 09:15:13 -05001662 if (gl->gl_name.ln_sbd == sdp &&
Bob Peterson88ffbf32015-03-16 11:02:46 -05001663 lockref_get_not_dead(&gl->gl_lockref))
1664 examiner(gl);
Herbert Xu98687f42017-02-11 19:26:45 +08001665
1666 rhashtable_walk_stop(&iter);
1667 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1668
1669 rhashtable_walk_exit(&iter);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001670}
1671
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001672/**
1673 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1674 * @gl: The glock to thaw
1675 *
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001676 */
1677
1678static void thaw_glock(struct gfs2_glock *gl)
1679{
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001680 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001681 gfs2_glock_put(gl);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001682 return;
Steven Whitehouse7286b312013-08-20 09:35:09 +01001683 }
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001684 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1685 gfs2_glock_queue_work(gl, 0);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001686}
1687
David Teiglandb3b94fa2006-01-16 16:50:04 +00001688/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001689 * clear_glock - look at a glock and see if we can free it from glock cache
1690 * @gl: the glock to look at
1691 *
1692 */
1693
1694static void clear_glock(struct gfs2_glock *gl)
1695{
Steven Whitehousef42ab082011-04-14 16:50:31 +01001696 gfs2_glock_remove_from_lru(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001697
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001698 spin_lock(&gl->gl_lockref.lock);
Steven Whitehousec741c452010-09-29 14:20:52 +01001699 if (gl->gl_state != LM_ST_UNLOCKED)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001700 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001701 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001702 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001703}
1704
1705/**
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001706 * gfs2_glock_thaw - Thaw any frozen glocks
1707 * @sdp: The super block
1708 *
1709 */
1710
1711void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1712{
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001713 glock_hash_walk(thaw_glock, sdp);
1714}
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001715
Bob Peterson3792ce92019-05-09 09:21:48 -05001716static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001717{
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001718 spin_lock(&gl->gl_lockref.lock);
Bob Peterson3792ce92019-05-09 09:21:48 -05001719 gfs2_dump_glock(seq, gl, fsid);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001720 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001721}
1722
1723static void dump_glock_func(struct gfs2_glock *gl)
1724{
Bob Peterson3792ce92019-05-09 09:21:48 -05001725 dump_glock(NULL, gl, true);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001726}
1727
1728/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001729 * gfs2_gl_hash_clear - Empty out the glock hash table
1730 * @sdp: the filesystem
1731 * @wait: wait until it's all gone
1732 *
Steven Whitehouse1bdad602008-06-03 14:09:53 +01001733 * Called when unmounting the filesystem.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001734 */
1735
Steven Whitehousefefc03b2008-12-19 15:32:06 +00001736void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001737{
David Teiglandfb6791d2012-11-13 10:58:56 -05001738 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
Bob Peterson222cb532013-04-25 12:49:17 -04001739 flush_workqueue(glock_workqueue);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001740 glock_hash_walk(clear_glock, sdp);
Steven Whitehouse8f052282010-01-29 15:21:27 +00001741 flush_workqueue(glock_workqueue);
Bob Peterson2aba1b52015-05-19 09:11:23 -05001742 wait_event_timeout(sdp->sd_glock_wait,
1743 atomic_read(&sdp->sd_glock_disposal) == 0,
1744 HZ * 600);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001745 glock_hash_walk(dump_glock_func, sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001746}
1747
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001748void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1749{
1750 struct gfs2_glock *gl = ip->i_gl;
1751 int ret;
1752
1753 ret = gfs2_truncatei_resume(ip);
Bob Peterson15562c42015-03-16 11:52:05 -05001754 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001755
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001756 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001757 clear_bit(GLF_LOCK, &gl->gl_flags);
1758 run_queue(gl, 1);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001759 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001760}
1761
Steven Whitehouse6802e342008-05-21 17:03:22 +01001762static const char *state2str(unsigned state)
Robert Peterson04b933f2007-03-23 17:05:15 -05001763{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001764 switch(state) {
1765 case LM_ST_UNLOCKED:
1766 return "UN";
1767 case LM_ST_SHARED:
1768 return "SH";
1769 case LM_ST_DEFERRED:
1770 return "DF";
1771 case LM_ST_EXCLUSIVE:
1772 return "EX";
1773 }
1774 return "??";
1775}
Robert Peterson04b933f2007-03-23 17:05:15 -05001776
Bob Petersonb58bf402015-07-24 09:45:43 -05001777static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
Steven Whitehouse6802e342008-05-21 17:03:22 +01001778{
1779 char *p = buf;
1780 if (flags & LM_FLAG_TRY)
1781 *p++ = 't';
1782 if (flags & LM_FLAG_TRY_1CB)
1783 *p++ = 'T';
1784 if (flags & LM_FLAG_NOEXP)
1785 *p++ = 'e';
1786 if (flags & LM_FLAG_ANY)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001787 *p++ = 'A';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001788 if (flags & LM_FLAG_PRIORITY)
1789 *p++ = 'p';
1790 if (flags & GL_ASYNC)
1791 *p++ = 'a';
1792 if (flags & GL_EXACT)
1793 *p++ = 'E';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001794 if (flags & GL_NOCACHE)
1795 *p++ = 'c';
1796 if (test_bit(HIF_HOLDER, &iflags))
1797 *p++ = 'H';
1798 if (test_bit(HIF_WAIT, &iflags))
1799 *p++ = 'W';
1800 if (test_bit(HIF_FIRST, &iflags))
1801 *p++ = 'F';
1802 *p = 0;
1803 return buf;
Robert Peterson04b933f2007-03-23 17:05:15 -05001804}
1805
David Teiglandb3b94fa2006-01-16 16:50:04 +00001806/**
1807 * dump_holder - print information about a glock holder
Steven Whitehouse6802e342008-05-21 17:03:22 +01001808 * @seq: the seq_file struct
David Teiglandb3b94fa2006-01-16 16:50:04 +00001809 * @gh: the glock holder
Bob Peterson3792ce92019-05-09 09:21:48 -05001810 * @fs_id_buf: pointer to file system id (if requested)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001811 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001812 */
1813
Bob Peterson3792ce92019-05-09 09:21:48 -05001814static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
1815 const char *fs_id_buf)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001816{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001817 struct task_struct *gh_owner = NULL;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001818 char flags_buf[32];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001819
Tetsuo Handa0b3a2c92014-01-02 19:52:20 +09001820 rcu_read_lock();
Steven Whitehouse6802e342008-05-21 17:03:22 +01001821 if (gh->gh_owner_pid)
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -08001822 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
Bob Peterson3792ce92019-05-09 09:21:48 -05001823 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1824 fs_id_buf, state2str(gh->gh_state),
Joe Perchescc181522010-11-05 16:12:36 -07001825 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1826 gh->gh_error,
1827 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1828 gh_owner ? gh_owner->comm : "(ended)",
1829 (void *)gh->gh_ip);
Tetsuo Handa0b3a2c92014-01-02 19:52:20 +09001830 rcu_read_unlock();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001831}
1832
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001833static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001834{
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001835 const unsigned long *gflags = &gl->gl_flags;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001836 char *p = buf;
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001837
Steven Whitehouse6802e342008-05-21 17:03:22 +01001838 if (test_bit(GLF_LOCK, gflags))
1839 *p++ = 'l';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001840 if (test_bit(GLF_DEMOTE, gflags))
1841 *p++ = 'D';
1842 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1843 *p++ = 'd';
1844 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1845 *p++ = 'p';
1846 if (test_bit(GLF_DIRTY, gflags))
1847 *p++ = 'y';
1848 if (test_bit(GLF_LFLUSH, gflags))
1849 *p++ = 'f';
1850 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1851 *p++ = 'i';
1852 if (test_bit(GLF_REPLY_PENDING, gflags))
1853 *p++ = 'r';
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001854 if (test_bit(GLF_INITIAL, gflags))
Steven Whitehoused8348de2009-02-05 10:12:38 +00001855 *p++ = 'I';
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001856 if (test_bit(GLF_FROZEN, gflags))
1857 *p++ = 'F';
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001858 if (test_bit(GLF_QUEUED, gflags))
1859 *p++ = 'q';
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001860 if (test_bit(GLF_LRU, gflags))
1861 *p++ = 'L';
1862 if (gl->gl_object)
1863 *p++ = 'o';
Steven Whitehousea2457692012-01-20 10:38:36 +00001864 if (test_bit(GLF_BLOCKING, gflags))
1865 *p++ = 'b';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001866 *p = 0;
1867 return buf;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001868}
1869
1870/**
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +01001871 * gfs2_dump_glock - print information about a glock
Steven Whitehouse6802e342008-05-21 17:03:22 +01001872 * @seq: The seq_file struct
David Teiglandb3b94fa2006-01-16 16:50:04 +00001873 * @gl: the glock
Bob Peterson3792ce92019-05-09 09:21:48 -05001874 * @fsid: If true, also dump the file system id
Steven Whitehouse6802e342008-05-21 17:03:22 +01001875 *
1876 * The file format is as follows:
1877 * One line per object, capital letters are used to indicate objects
1878 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1879 * other objects are indented by a single space and follow the glock to
1880 * which they are related. Fields are indicated by lower case letters
1881 * followed by a colon and the field value, except for strings which are in
1882 * [] so that its possible to see if they are composed of spaces for
1883 * example. The field's are n = number (id of the object), f = flags,
1884 * t = type, s = state, r = refcount, e = error, p = pid.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001885 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001886 */
1887
Bob Peterson3792ce92019-05-09 09:21:48 -05001888void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001889{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001890 const struct gfs2_glock_operations *glops = gl->gl_ops;
1891 unsigned long long dtime;
1892 const struct gfs2_holder *gh;
1893 char gflags_buf[32];
Bob Peterson3792ce92019-05-09 09:21:48 -05001894 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Bob Peterson98fb0572019-08-13 09:25:15 -04001895 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001896
Bob Peterson3792ce92019-05-09 09:21:48 -05001897 memset(fs_id_buf, 0, sizeof(fs_id_buf));
1898 if (fsid && sdp) /* safety precaution */
1899 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001900 dtime = jiffies - gl->gl_demote_time;
1901 dtime *= 1000000/HZ; /* demote time in uSec */
1902 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1903 dtime = 0;
Bob Peterson3792ce92019-05-09 09:21:48 -05001904 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
1905 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
Steven Whitehouse6802e342008-05-21 17:03:22 +01001906 gl->gl_name.ln_type,
1907 (unsigned long long)gl->gl_name.ln_number,
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001908 gflags2str(gflags_buf, gl),
Steven Whitehouse6802e342008-05-21 17:03:22 +01001909 state2str(gl->gl_target),
1910 state2str(gl->gl_demote_state), dtime,
Steven Whitehouse6802e342008-05-21 17:03:22 +01001911 atomic_read(&gl->gl_ail_count),
Bob Peterson638803d2019-06-06 07:33:38 -05001912 atomic_read(&gl->gl_revokes),
Steven Whitehousee66cf162013-10-15 15:18:08 +01001913 (int)gl->gl_lockref.count, gl->gl_hold_time);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001914
Steven Whitehouseac3beb62014-01-16 10:31:13 +00001915 list_for_each_entry(gh, &gl->gl_holders, gh_list)
Bob Peterson3792ce92019-05-09 09:21:48 -05001916 dump_holder(seq, gh, fs_id_buf);
Steven Whitehouseac3beb62014-01-16 10:31:13 +00001917
Steven Whitehouse6802e342008-05-21 17:03:22 +01001918 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
Bob Peterson3792ce92019-05-09 09:21:48 -05001919 glops->go_dump(seq, gl, fs_id_buf);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001920}
1921
Steven Whitehousea2457692012-01-20 10:38:36 +00001922static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1923{
1924 struct gfs2_glock *gl = iter_ptr;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001925
Ben Hutchings4d207132015-08-27 12:51:45 -05001926 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
Steven Whitehousea2457692012-01-20 10:38:36 +00001927 gl->gl_name.ln_type,
1928 (unsigned long long)gl->gl_name.ln_number,
Ben Hutchings4d207132015-08-27 12:51:45 -05001929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1930 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1931 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1932 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1933 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1934 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1935 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1936 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
Steven Whitehousea2457692012-01-20 10:38:36 +00001937 return 0;
1938}
David Teiglandb3b94fa2006-01-16 16:50:04 +00001939
Steven Whitehousea2457692012-01-20 10:38:36 +00001940static const char *gfs2_gltype[] = {
1941 "type",
1942 "reserved",
1943 "nondisk",
1944 "inode",
1945 "rgrp",
1946 "meta",
1947 "iopen",
1948 "flock",
1949 "plock",
1950 "quota",
1951 "journal",
1952};
1953
1954static const char *gfs2_stype[] = {
1955 [GFS2_LKS_SRTT] = "srtt",
1956 [GFS2_LKS_SRTTVAR] = "srttvar",
1957 [GFS2_LKS_SRTTB] = "srttb",
1958 [GFS2_LKS_SRTTVARB] = "srttvarb",
1959 [GFS2_LKS_SIRT] = "sirt",
1960 [GFS2_LKS_SIRTVAR] = "sirtvar",
1961 [GFS2_LKS_DCOUNT] = "dlm",
1962 [GFS2_LKS_QCOUNT] = "queue",
1963};
1964
1965#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1966
1967static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1968{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05001969 struct gfs2_sbd *sdp = seq->private;
1970 loff_t pos = *(loff_t *)iter_ptr;
1971 unsigned index = pos >> 3;
1972 unsigned subindex = pos & 0x07;
Steven Whitehousea2457692012-01-20 10:38:36 +00001973 int i;
1974
1975 if (index == 0 && subindex != 0)
1976 return 0;
1977
1978 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1979 (index == 0) ? "cpu": gfs2_stype[subindex]);
1980
1981 for_each_possible_cpu(i) {
1982 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
Andreas Gruenbacher8f7e0a82015-08-27 13:02:54 -05001983
1984 if (index == 0)
1985 seq_printf(seq, " %15u", i);
1986 else
1987 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1988 lkstats[index - 1].stats[subindex]);
Steven Whitehousea2457692012-01-20 10:38:36 +00001989 }
1990 seq_putc(seq, '\n');
1991 return 0;
1992}
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01001993
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001994int __init gfs2_glock_init(void)
1995{
Andreas Gruenbacher05154802017-08-01 11:18:26 -05001996 int i, ret;
Bob Peterson88ffbf32015-03-16 11:02:46 -05001997
1998 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1999 if (ret < 0)
2000 return ret;
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01002001
Steven Whitehoused2115772010-11-03 19:58:53 +00002002 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
Tejun Heo58a69cb2011-02-16 09:25:31 +01002003 WQ_HIGHPRI | WQ_FREEZABLE, 0);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002004 if (!glock_workqueue) {
2005 rhashtable_destroy(&gl_hash_table);
Dan Carpenterdfc46162013-08-15 10:54:43 +03002006 return -ENOMEM;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002007 }
Steven Whitehoused2115772010-11-03 19:58:53 +00002008 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
Tejun Heo58a69cb2011-02-16 09:25:31 +01002009 WQ_MEM_RECLAIM | WQ_FREEZABLE,
Steven Whitehoused2115772010-11-03 19:58:53 +00002010 0);
Dan Carpenterdfc46162013-08-15 10:54:43 +03002011 if (!gfs2_delete_workqueue) {
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002012 destroy_workqueue(glock_workqueue);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002013 rhashtable_destroy(&gl_hash_table);
Dan Carpenterdfc46162013-08-15 10:54:43 +03002014 return -ENOMEM;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002015 }
Steven Whitehouse97cc10252008-11-20 13:39:47 +00002016
Chao Yue0d735c2016-09-21 12:09:40 -05002017 ret = register_shrinker(&glock_shrinker);
2018 if (ret) {
2019 destroy_workqueue(gfs2_delete_workqueue);
2020 destroy_workqueue(glock_workqueue);
2021 rhashtable_destroy(&gl_hash_table);
2022 return ret;
2023 }
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05002024
Andreas Gruenbacher05154802017-08-01 11:18:26 -05002025 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2026 init_waitqueue_head(glock_wait_table + i);
2027
Steven Whitehouse85d1da62006-09-07 14:40:21 -04002028 return 0;
2029}
2030
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01002031void gfs2_glock_exit(void)
2032{
Steven Whitehouse97cc10252008-11-20 13:39:47 +00002033 unregister_shrinker(&glock_shrinker);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002034 rhashtable_destroy(&gl_hash_table);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05002035 destroy_workqueue(glock_workqueue);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002036 destroy_workqueue(gfs2_delete_workqueue);
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01002037}
2038
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002039static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002040{
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002041 struct gfs2_glock *gl = gi->gl;
2042
2043 if (gl) {
2044 if (n == 0)
2045 return;
2046 if (!lockref_put_not_zero(&gl->gl_lockref))
2047 gfs2_glock_queue_put(gl);
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002048 }
2049 for (;;) {
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002050 gl = rhashtable_walk_next(&gi->hti);
2051 if (IS_ERR_OR_NULL(gl)) {
2052 if (gl == ERR_PTR(-EAGAIN)) {
2053 n = 1;
2054 continue;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002055 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002056 gl = NULL;
2057 break;
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002058 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002059 if (gl->gl_name.ln_sbd != gi->sdp)
2060 continue;
2061 if (n <= 1) {
2062 if (!lockref_get_not_dead(&gl->gl_lockref))
2063 continue;
2064 break;
2065 } else {
2066 if (__lockref_is_dead(&gl->gl_lockref))
2067 continue;
2068 n--;
2069 }
Dan Carpenter14d37562016-12-14 08:02:03 -06002070 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002071 gi->gl = gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002072}
2073
Steven Whitehouse6802e342008-05-21 17:03:22 +01002074static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
Bob Peterson27c3b412017-08-18 09:15:13 -05002075 __acquires(RCU)
Robert Peterson7c52b162007-03-16 10:26:37 +00002076{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002077 struct gfs2_glock_iter *gi = seq->private;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002078 loff_t n;
Robert Peterson7c52b162007-03-16 10:26:37 +00002079
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002080 /*
2081 * We can either stay where we are, skip to the next hash table
2082 * entry, or start from the beginning.
2083 */
2084 if (*pos < gi->last_pos) {
2085 rhashtable_walk_exit(&gi->hti);
2086 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2087 n = *pos + 1;
2088 } else {
2089 n = *pos - gi->last_pos;
2090 }
Robert Peterson7c52b162007-03-16 10:26:37 +00002091
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002092 rhashtable_walk_start(&gi->hti);
Robert Peterson7c52b162007-03-16 10:26:37 +00002093
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002094 gfs2_glock_iter_next(gi, n);
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +01002095 gi->last_pos = *pos;
Steven Whitehouse6802e342008-05-21 17:03:22 +01002096 return gi->gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002097}
2098
Steven Whitehouse6802e342008-05-21 17:03:22 +01002099static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
Robert Peterson7c52b162007-03-16 10:26:37 +00002100 loff_t *pos)
2101{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002102 struct gfs2_glock_iter *gi = seq->private;
Robert Peterson7c52b162007-03-16 10:26:37 +00002103
2104 (*pos)++;
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +01002105 gi->last_pos = *pos;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002106 gfs2_glock_iter_next(gi, 1);
Steven Whitehouse6802e342008-05-21 17:03:22 +01002107 return gi->gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002108}
2109
Steven Whitehouse6802e342008-05-21 17:03:22 +01002110static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
Bob Peterson27c3b412017-08-18 09:15:13 -05002111 __releases(RCU)
Robert Peterson7c52b162007-03-16 10:26:37 +00002112{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002113 struct gfs2_glock_iter *gi = seq->private;
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002114
Bob Peterson88ffbf32015-03-16 11:02:46 -05002115 rhashtable_walk_stop(&gi->hti);
Robert Peterson7c52b162007-03-16 10:26:37 +00002116}
2117
Steven Whitehouse6802e342008-05-21 17:03:22 +01002118static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
Robert Peterson7c52b162007-03-16 10:26:37 +00002119{
Bob Peterson3792ce92019-05-09 09:21:48 -05002120 dump_glock(seq, iter_ptr, false);
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002121 return 0;
Robert Peterson7c52b162007-03-16 10:26:37 +00002122}
2123
Steven Whitehousea2457692012-01-20 10:38:36 +00002124static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2125{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002126 preempt_disable();
Steven Whitehousea2457692012-01-20 10:38:36 +00002127 if (*pos >= GFS2_NR_SBSTATS)
2128 return NULL;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002129 return pos;
Steven Whitehousea2457692012-01-20 10:38:36 +00002130}
2131
2132static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2133 loff_t *pos)
2134{
Steven Whitehousea2457692012-01-20 10:38:36 +00002135 (*pos)++;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002136 if (*pos >= GFS2_NR_SBSTATS)
Steven Whitehousea2457692012-01-20 10:38:36 +00002137 return NULL;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002138 return pos;
Steven Whitehousea2457692012-01-20 10:38:36 +00002139}
2140
2141static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2142{
2143 preempt_enable();
2144}
2145
Denis Cheng4ef29002007-07-31 18:31:11 +08002146static const struct seq_operations gfs2_glock_seq_ops = {
Robert Peterson7c52b162007-03-16 10:26:37 +00002147 .start = gfs2_glock_seq_start,
2148 .next = gfs2_glock_seq_next,
2149 .stop = gfs2_glock_seq_stop,
2150 .show = gfs2_glock_seq_show,
2151};
2152
Steven Whitehousea2457692012-01-20 10:38:36 +00002153static const struct seq_operations gfs2_glstats_seq_ops = {
2154 .start = gfs2_glock_seq_start,
2155 .next = gfs2_glock_seq_next,
2156 .stop = gfs2_glock_seq_stop,
2157 .show = gfs2_glstats_seq_show,
2158};
2159
2160static const struct seq_operations gfs2_sbstats_seq_ops = {
2161 .start = gfs2_sbstats_seq_start,
2162 .next = gfs2_sbstats_seq_next,
2163 .stop = gfs2_sbstats_seq_stop,
2164 .show = gfs2_sbstats_seq_show,
2165};
2166
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002167#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2168
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002169static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2170 const struct seq_operations *ops)
Robert Peterson7c52b162007-03-16 10:26:37 +00002171{
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002172 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
Steven Whitehouse6802e342008-05-21 17:03:22 +01002173 if (ret == 0) {
2174 struct seq_file *seq = file->private_data;
2175 struct gfs2_glock_iter *gi = seq->private;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002176
Steven Whitehouse6802e342008-05-21 17:03:22 +01002177 gi->sdp = inode->i_private;
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002178 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
Steven Whitehousedf5d2f52012-06-07 13:30:16 +01002179 if (seq->buf)
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002180 seq->size = GFS2_SEQ_GOODSIZE;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002181 /*
2182 * Initially, we are "before" the first hash table entry; the
2183 * first call to rhashtable_walk_next gets us the first entry.
2184 */
2185 gi->last_pos = -1;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002186 gi->gl = NULL;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002187 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
Steven Whitehouse6802e342008-05-21 17:03:22 +01002188 }
2189 return ret;
Robert Peterson7c52b162007-03-16 10:26:37 +00002190}
2191
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002192static int gfs2_glocks_open(struct inode *inode, struct file *file)
2193{
2194 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2195}
2196
Bob Peterson88ffbf32015-03-16 11:02:46 -05002197static int gfs2_glocks_release(struct inode *inode, struct file *file)
2198{
2199 struct seq_file *seq = file->private_data;
2200 struct gfs2_glock_iter *gi = seq->private;
2201
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002202 if (gi->gl)
2203 gfs2_glock_put(gi->gl);
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002204 rhashtable_walk_exit(&gi->hti);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002205 return seq_release_private(inode, file);
2206}
2207
Steven Whitehousea2457692012-01-20 10:38:36 +00002208static int gfs2_glstats_open(struct inode *inode, struct file *file)
2209{
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002210 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002211}
2212
2213static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2214{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002215 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002216 if (ret == 0) {
2217 struct seq_file *seq = file->private_data;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002218 seq->private = inode->i_private; /* sdp */
Steven Whitehousea2457692012-01-20 10:38:36 +00002219 }
2220 return ret;
2221}
2222
2223static const struct file_operations gfs2_glocks_fops = {
Robert Peterson7c52b162007-03-16 10:26:37 +00002224 .owner = THIS_MODULE,
Steven Whitehousea2457692012-01-20 10:38:36 +00002225 .open = gfs2_glocks_open,
2226 .read = seq_read,
2227 .llseek = seq_lseek,
Bob Peterson88ffbf32015-03-16 11:02:46 -05002228 .release = gfs2_glocks_release,
Steven Whitehousea2457692012-01-20 10:38:36 +00002229};
2230
2231static const struct file_operations gfs2_glstats_fops = {
2232 .owner = THIS_MODULE,
2233 .open = gfs2_glstats_open,
2234 .read = seq_read,
2235 .llseek = seq_lseek,
Bob Peterson88ffbf32015-03-16 11:02:46 -05002236 .release = gfs2_glocks_release,
Steven Whitehousea2457692012-01-20 10:38:36 +00002237};
2238
2239static const struct file_operations gfs2_sbstats_fops = {
2240 .owner = THIS_MODULE,
2241 .open = gfs2_sbstats_open,
Robert Peterson7c52b162007-03-16 10:26:37 +00002242 .read = seq_read,
2243 .llseek = seq_lseek,
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002244 .release = seq_release,
Robert Peterson7c52b162007-03-16 10:26:37 +00002245};
2246
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002247void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
Robert Peterson7c52b162007-03-16 10:26:37 +00002248{
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002249 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
Steven Whitehousea2457692012-01-20 10:38:36 +00002250
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002251 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2252 &gfs2_glocks_fops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002253
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002254 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2255 &gfs2_glstats_fops);
Chengyu Song7b4ddfa2015-03-24 09:37:53 -05002256
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002257 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2258 &gfs2_sbstats_fops);
Robert Peterson7c52b162007-03-16 10:26:37 +00002259}
2260
2261void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2262{
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002263 debugfs_remove_recursive(sdp->debugfs_dir);
2264 sdp->debugfs_dir = NULL;
Robert Peterson7c52b162007-03-16 10:26:37 +00002265}
2266
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002267void gfs2_register_debugfs(void)
Robert Peterson7c52b162007-03-16 10:26:37 +00002268{
2269 gfs2_root = debugfs_create_dir("gfs2", NULL);
Robert Peterson7c52b162007-03-16 10:26:37 +00002270}
2271
2272void gfs2_unregister_debugfs(void)
2273{
2274 debugfs_remove(gfs2_root);
Robert Peterson5f882092007-04-18 11:41:11 -05002275 gfs2_root = NULL;
Robert Peterson7c52b162007-03-16 10:26:37 +00002276}