blob: faa88bd594e289708c477da9d39ecdb3f6237568 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersoncf45b752008-01-31 10:31:39 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
Joe Perchesd77d1b52014-03-06 12:10:45 -08007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Teiglandb3b94fa2006-01-16 16:50:04 +00009#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000012#include <linux/buffer_head.h>
13#include <linux/delay.h>
14#include <linux/sort.h>
Andreas Gruenbacher05154802017-08-01 11:18:26 -050015#include <linux/hash.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000016#include <linux/jhash.h>
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050017#include <linux/kallsyms.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse24264432006-09-11 21:40:30 -040019#include <linux/list.h>
Steven Whitehousefee852e2007-01-17 15:33:23 +000020#include <linux/wait.h>
akpm@linux-foundation.org95d97b72007-03-05 23:10:39 -080021#include <linux/module.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Robert Peterson7c52b162007-03-16 10:26:37 +000023#include <linux/seq_file.h>
24#include <linux/debugfs.h>
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +010025#include <linux/kthread.h>
26#include <linux/freezer.h>
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050027#include <linux/workqueue.h>
28#include <linux/jiffies.h>
Steven Whitehousebc015cb2011-01-19 09:30:01 +000029#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
Steven Whitehousea2457692012-01-20 10:38:36 +000032#include <linux/percpu.h>
Steven Whitehouse4506a5192013-02-01 20:36:03 +000033#include <linux/list_sort.h>
Steven Whitehousee66cf162013-10-15 15:18:08 +010034#include <linux/lockref.h>
Bob Peterson88ffbf32015-03-16 11:02:46 -050035#include <linux/rhashtable.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000036
37#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050038#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000039#include "glock.h"
40#include "glops.h"
41#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000042#include "lops.h"
43#include "meta_io.h"
44#include "quota.h"
45#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050046#include "util.h"
Steven Whitehouse813e0c42008-11-18 13:38:48 +000047#include "bmap.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010048#define CREATE_TRACE_POINTS
49#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000050
Steven Whitehouse6802e342008-05-21 17:03:22 +010051struct gfs2_glock_iter {
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +010052 struct gfs2_sbd *sdp; /* incore superblock */
Bob Peterson88ffbf32015-03-16 11:02:46 -050053 struct rhashtable_iter hti; /* rhashtable iterator */
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +010054 struct gfs2_glock *gl; /* current glock struct */
55 loff_t last_pos; /* last position */
Robert Peterson7c52b162007-03-16 10:26:37 +000056};
57
David Teiglandb3b94fa2006-01-16 16:50:04 +000058typedef void (*glock_examiner) (struct gfs2_glock * gl);
59
Steven Whitehouse6802e342008-05-21 17:03:22 +010060static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050061
Robert Peterson7c52b162007-03-16 10:26:37 +000062static struct dentry *gfs2_root;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -050063static struct workqueue_struct *glock_workqueue;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -050064struct workqueue_struct *gfs2_delete_workqueue;
Steven Whitehouse97cc10252008-11-20 13:39:47 +000065static LIST_HEAD(lru_list);
66static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawalleb8374e2008-12-25 15:35:27 +010067static DEFINE_SPINLOCK(lru_lock);
Adrian Bunk08bc2db2006-04-28 10:59:12 -040068
Steven Whitehouseb6397892006-09-12 10:10:01 -040069#define GFS2_GL_HASH_SHIFT 15
Fabian Frederick47a9a522016-08-02 12:05:27 -050070#define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
Steven Whitehouse087efdd2006-09-09 16:59:11 -040071
Arvind Yadavd296b152017-08-30 07:50:03 -050072static const struct rhashtable_params ht_parms = {
Bob Peterson88ffbf32015-03-16 11:02:46 -050073 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
Andreas Gruenbacher972b0442017-03-16 09:54:57 -040074 .key_len = offsetofend(struct lm_lockname, ln_type),
Bob Peterson88ffbf32015-03-16 11:02:46 -050075 .key_offset = offsetof(struct gfs2_glock, gl_name),
76 .head_offset = offsetof(struct gfs2_glock, gl_node),
77};
Steven Whitehouse087efdd2006-09-09 16:59:11 -040078
Bob Peterson88ffbf32015-03-16 11:02:46 -050079static struct rhashtable gl_hash_table;
David Teiglandb3b94fa2006-01-16 16:50:04 +000080
Andreas Gruenbacher05154802017-08-01 11:18:26 -050081#define GLOCK_WAIT_TABLE_BITS 12
82#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
83static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
84
85struct wait_glock_queue {
86 struct lm_lockname *name;
87 wait_queue_entry_t wait;
88};
89
90static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
91 int sync, void *key)
92{
93 struct wait_glock_queue *wait_glock =
94 container_of(wait, struct wait_glock_queue, wait);
95 struct lm_lockname *wait_name = wait_glock->name;
96 struct lm_lockname *wake_name = key;
97
98 if (wake_name->ln_sbd != wait_name->ln_sbd ||
99 wake_name->ln_number != wait_name->ln_number ||
100 wake_name->ln_type != wait_name->ln_type)
101 return 0;
102 return autoremove_wake_function(wait, mode, sync, key);
103}
104
105static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
106{
Andreas Gruenbacher605b0482019-03-06 15:41:57 +0100107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500108
109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
110}
111
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500112/**
113 * wake_up_glock - Wake up waiters on a glock
114 * @gl: the glock
115 */
116static void wake_up_glock(struct gfs2_glock *gl)
117{
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
119
120 if (waitqueue_active(wq))
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
122}
123
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500124static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000125{
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000127
David Teiglanddba2d702012-11-14 13:46:53 -0500128 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000129 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
David Teiglanddba2d702012-11-14 13:46:53 -0500130 } else {
David Teigland4e2f8842012-11-14 13:47:37 -0500131 kfree(gl->gl_lksb.sb_lvbptr);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000132 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglanddba2d702012-11-14 13:46:53 -0500133 }
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500134}
135
136void gfs2_glock_free(struct gfs2_glock *gl)
137{
138 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
139
Bob Peterson638803d2019-06-06 07:33:38 -0500140 BUG_ON(atomic_read(&gl->gl_revokes));
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500141 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
142 smp_mb();
143 wake_up_glock(gl);
Andreas Gruenbacher961ae1d2017-07-07 13:22:05 -0500144 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000145 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
146 wake_up(&sdp->sd_glock_wait);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000147}
148
149/**
150 * gfs2_glock_hold() - increment reference count on glock
151 * @gl: The glock to hold
152 *
153 */
154
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500155void gfs2_glock_hold(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000156{
Steven Whitehousee66cf162013-10-15 15:18:08 +0100157 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
158 lockref_get(&gl->gl_lockref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000159}
160
161/**
Benjamin Marzinski8ff22a62009-07-10 18:04:24 -0500162 * demote_ok - Check to see if it's ok to unlock a glock
163 * @gl: the glock
164 *
165 * Returns: 1 if it's ok
166 */
167
168static int demote_ok(const struct gfs2_glock *gl)
169{
170 const struct gfs2_glock_operations *glops = gl->gl_ops;
171
172 if (gl->gl_state == LM_ST_UNLOCKED)
173 return 0;
Steven Whitehousef42ab082011-04-14 16:50:31 +0100174 if (!list_empty(&gl->gl_holders))
Benjamin Marzinski8ff22a62009-07-10 18:04:24 -0500175 return 0;
176 if (glops->go_demote_ok)
177 return glops->go_demote_ok(gl);
178 return 1;
179}
180
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000181
Steven Whitehouse29687a22011-03-30 16:33:25 +0100182void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
183{
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000184 if (!(gl->gl_ops->go_flags & GLOF_LRU))
185 return;
186
Steven Whitehouse29687a22011-03-30 16:33:25 +0100187 spin_lock(&lru_lock);
188
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000189 list_del(&gl->gl_lru);
Steven Whitehouse29687a22011-03-30 16:33:25 +0100190 list_add_tail(&gl->gl_lru, &lru_list);
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000191
192 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
193 set_bit(GLF_LRU, &gl->gl_flags);
194 atomic_inc(&lru_count);
195 }
196
Steven Whitehouse29687a22011-03-30 16:33:25 +0100197 spin_unlock(&lru_lock);
198}
199
Bob Peterson8f6cb402015-01-05 13:25:10 -0500200static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
Steven Whitehousef42ab082011-04-14 16:50:31 +0100201{
Bob Peterson645ebd42017-07-26 10:57:35 -0500202 if (!(gl->gl_ops->go_flags & GLOF_LRU))
203 return;
204
Bob Peterson8f6cb402015-01-05 13:25:10 -0500205 spin_lock(&lru_lock);
Ross Lagerwall7881ef32019-03-27 17:09:17 +0000206 if (test_bit(GLF_LRU, &gl->gl_flags)) {
Steven Whitehousef42ab082011-04-14 16:50:31 +0100207 list_del_init(&gl->gl_lru);
208 atomic_dec(&lru_count);
209 clear_bit(GLF_LRU, &gl->gl_flags);
210 }
211 spin_unlock(&lru_lock);
212}
213
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500214/*
215 * Enqueue the glock on the work queue. Passes one glock reference on to the
216 * work queue.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000217 */
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500218static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
219 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
220 /*
221 * We are holding the lockref spinlock, and the work was still
222 * queued above. The queued work (glock_work_func) takes that
223 * spinlock before dropping its glock reference(s), so it
224 * cannot have dropped them in the meantime.
225 */
226 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
227 gl->gl_lockref.count--;
228 }
229}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000230
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500231static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
232 spin_lock(&gl->gl_lockref.lock);
233 __gfs2_glock_queue_work(gl, delay);
234 spin_unlock(&gl->gl_lockref.lock);
235}
236
237static void __gfs2_glock_put(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000238{
Bob Peterson15562c42015-03-16 11:52:05 -0500239 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000240 struct address_space *mapping = gfs2_glock2aspace(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000241
Steven Whitehousee66cf162013-10-15 15:18:08 +0100242 lockref_mark_dead(&gl->gl_lockref);
243
Bob Peterson8f6cb402015-01-05 13:25:10 -0500244 gfs2_glock_remove_from_lru(gl);
Steven Whitehousee66cf162013-10-15 15:18:08 +0100245 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehousee66cf162013-10-15 15:18:08 +0100246 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
247 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
248 trace_gfs2_glock_put(gl);
249 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000250}
251
Andreas Gruenbacher71c1b2132017-08-01 11:45:23 -0500252/*
253 * Cause the glock to be put in work queue context.
254 */
255void gfs2_glock_queue_put(struct gfs2_glock *gl)
256{
257 gfs2_glock_queue_work(gl, 0);
258}
259
David Teiglandb3b94fa2006-01-16 16:50:04 +0000260/**
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500261 * gfs2_glock_put() - Decrement reference count on glock
262 * @gl: The glock to put
263 *
264 */
265
266void gfs2_glock_put(struct gfs2_glock *gl)
267{
268 if (lockref_put_or_lock(&gl->gl_lockref))
269 return;
270
271 __gfs2_glock_put(gl);
272}
273
274/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100275 * may_grant - check if its ok to grant a new lock
276 * @gl: The glock
277 * @gh: The lock request which we wish to grant
278 *
279 * Returns: true if its ok to grant the lock
280 */
281
282static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500283{
Steven Whitehouse6802e342008-05-21 17:03:22 +0100284 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
285 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
286 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
287 return 0;
288 if (gl->gl_state == gh->gh_state)
289 return 1;
290 if (gh->gh_flags & GL_EXACT)
291 return 0;
Steven Whitehouse209806a2008-07-07 10:07:28 +0100292 if (gl->gl_state == LM_ST_EXCLUSIVE) {
293 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
294 return 1;
295 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
296 return 1;
297 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100298 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
299 return 1;
300 return 0;
301}
302
303static void gfs2_holder_wake(struct gfs2_holder *gh)
304{
305 clear_bit(HIF_WAIT, &gh->gh_iflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100306 smp_mb__after_atomic();
Steven Whitehouse6802e342008-05-21 17:03:22 +0100307 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
Bob Petersonad269672019-08-30 12:31:02 -0500308 if (gh->gh_flags & GL_ASYNC) {
309 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
310
311 wake_up(&sdp->sd_async_glock_wait);
312 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100313}
314
315/**
Steven Whitehoused5341a92010-07-23 14:05:51 +0100316 * do_error - Something unexpected has happened during a lock request
317 *
318 */
319
Denys Vlasenkoa527b382016-04-12 12:39:12 -0400320static void do_error(struct gfs2_glock *gl, const int ret)
Steven Whitehoused5341a92010-07-23 14:05:51 +0100321{
322 struct gfs2_holder *gh, *tmp;
323
324 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
325 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
326 continue;
327 if (ret & LM_OUT_ERROR)
328 gh->gh_error = -EIO;
329 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
330 gh->gh_error = GLR_TRYFAILED;
331 else
332 continue;
333 list_del_init(&gh->gh_list);
334 trace_gfs2_glock_queue(gh, 0);
335 gfs2_holder_wake(gh);
336 }
337}
338
339/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100340 * do_promote - promote as many requests as possible on the current queue
341 * @gl: The glock
342 *
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000343 * Returns: 1 if there is a blocked holder at the head of the list, or 2
344 * if a type specific operation is underway.
Steven Whitehouse6802e342008-05-21 17:03:22 +0100345 */
346
347static int do_promote(struct gfs2_glock *gl)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500348__releases(&gl->gl_lockref.lock)
349__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100350{
351 const struct gfs2_glock_operations *glops = gl->gl_ops;
352 struct gfs2_holder *gh, *tmp;
353 int ret;
354
355restart:
356 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
357 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
358 continue;
359 if (may_grant(gl, gh)) {
360 if (gh->gh_list.prev == &gl->gl_holders &&
361 glops->go_lock) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500362 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100363 /* FIXME: eliminate this eventually */
364 ret = glops->go_lock(gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500365 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100366 if (ret) {
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000367 if (ret == 1)
368 return 2;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100369 gh->gh_error = ret;
370 list_del_init(&gh->gh_list);
Steven Whitehouse63997772009-06-12 08:49:20 +0100371 trace_gfs2_glock_queue(gh, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100372 gfs2_holder_wake(gh);
373 goto restart;
374 }
375 set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse63997772009-06-12 08:49:20 +0100376 trace_gfs2_promote(gh, 1);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100377 gfs2_holder_wake(gh);
378 goto restart;
379 }
380 set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse63997772009-06-12 08:49:20 +0100381 trace_gfs2_promote(gh, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100382 gfs2_holder_wake(gh);
383 continue;
384 }
385 if (gh->gh_list.prev == &gl->gl_holders)
386 return 1;
Steven Whitehoused5341a92010-07-23 14:05:51 +0100387 do_error(gl, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100388 break;
389 }
390 return 0;
391}
392
393/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100394 * find_first_waiter - find the first gh that's waiting for the glock
395 * @gl: the glock
396 */
397
398static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
399{
400 struct gfs2_holder *gh;
401
402 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
403 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
404 return gh;
405 }
406 return NULL;
407}
408
409/**
410 * state_change - record that the glock is now in a different state
411 * @gl: the glock
412 * @new_state the new state
413 *
414 */
415
416static void state_change(struct gfs2_glock *gl, unsigned int new_state)
417{
418 int held1, held2;
419
420 held1 = (gl->gl_state != LM_ST_UNLOCKED);
421 held2 = (new_state != LM_ST_UNLOCKED);
422
423 if (held1 != held2) {
Steven Whitehousee66cf162013-10-15 15:18:08 +0100424 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
Steven Whitehouse6802e342008-05-21 17:03:22 +0100425 if (held2)
Steven Whitehousee66cf162013-10-15 15:18:08 +0100426 gl->gl_lockref.count++;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100427 else
Steven Whitehousee66cf162013-10-15 15:18:08 +0100428 gl->gl_lockref.count--;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100429 }
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +0100430 if (held1 && held2 && list_empty(&gl->gl_holders))
431 clear_bit(GLF_QUEUED, &gl->gl_flags);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100432
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400433 if (new_state != gl->gl_target)
434 /* shorten our minimum hold time */
435 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
436 GL_GLOCK_MIN_HOLD);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100437 gl->gl_state = new_state;
438 gl->gl_tchange = jiffies;
439}
440
441static void gfs2_demote_wake(struct gfs2_glock *gl)
442{
443 gl->gl_demote_state = LM_ST_EXCLUSIVE;
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100445 smp_mb__after_atomic();
Steven Whitehouse6802e342008-05-21 17:03:22 +0100446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
447}
448
449/**
450 * finish_xmote - The DLM has replied to one of our lock requests
451 * @gl: The glock
452 * @ret: The status from the DLM
453 *
454 */
455
456static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
457{
458 const struct gfs2_glock_operations *glops = gl->gl_ops;
459 struct gfs2_holder *gh;
460 unsigned state = ret & LM_OUT_ST_MASK;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000461 int rv;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500462
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500463 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse63997772009-06-12 08:49:20 +0100464 trace_gfs2_glock_state_change(gl, state);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100465 state_change(gl, state);
466 gh = find_first_waiter(gl);
467
468 /* Demote to UN request arrived during demote to SH or DF */
469 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
470 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
471 gl->gl_target = LM_ST_UNLOCKED;
472
473 /* Check for state != intended state */
474 if (unlikely(state != gl->gl_target)) {
475 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
476 /* move to back of queue and try next entry */
477 if (ret & LM_OUT_CANCELED) {
478 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
479 list_move_tail(&gh->gh_list, &gl->gl_holders);
480 gh = find_first_waiter(gl);
481 gl->gl_target = gh->gh_state;
482 goto retry;
483 }
484 /* Some error or failed "try lock" - report it */
485 if ((ret & LM_OUT_ERROR) ||
486 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
487 gl->gl_target = gl->gl_state;
488 do_error(gl, ret);
489 goto out;
490 }
491 }
492 switch(state) {
493 /* Unlocked due to conversion deadlock, try again */
494 case LM_ST_UNLOCKED:
495retry:
496 do_xmote(gl, gh, gl->gl_target);
497 break;
498 /* Conversion fails, unlock and try again */
499 case LM_ST_SHARED:
500 case LM_ST_DEFERRED:
501 do_xmote(gl, gh, LM_ST_UNLOCKED);
502 break;
503 default: /* Everything else */
Bob Petersone54c78a2018-10-03 08:47:36 -0500504 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
505 gl->gl_target, state);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100506 GLOCK_BUG_ON(gl, 1);
507 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500508 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100509 return;
510 }
511
512 /* Fast path - we got what we asked for */
513 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
514 gfs2_demote_wake(gl);
515 if (state != LM_ST_UNLOCKED) {
516 if (glops->go_xmote_bh) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500517 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100518 rv = glops->go_xmote_bh(gl, gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500519 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100520 if (rv) {
521 do_error(gl, rv);
522 goto out;
523 }
524 }
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000525 rv = do_promote(gl);
526 if (rv == 2)
527 goto out_locked;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100528 }
529out:
530 clear_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000531out_locked:
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500532 spin_unlock(&gl->gl_lockref.lock);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500533}
534
Steven Whitehouse6802e342008-05-21 17:03:22 +0100535/**
536 * do_xmote - Calls the DLM to change the state of a lock
537 * @gl: The lock state
538 * @gh: The holder (only for promotes)
539 * @target: The target lock state
540 *
541 */
542
543static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500544__releases(&gl->gl_lockref.lock)
545__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100546{
547 const struct gfs2_glock_operations *glops = gl->gl_ops;
Bob Peterson15562c42015-03-16 11:52:05 -0500548 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Bob Petersonb58bf402015-07-24 09:45:43 -0500549 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100550 int ret;
551
Bob Petersoneb43e662019-11-14 09:52:15 -0500552 if (unlikely(gfs2_withdrawn(sdp)) &&
Bob Petersoned175452017-05-05 09:43:02 -0500553 target != LM_ST_UNLOCKED)
Bob Peterson0d1c7ae2017-03-03 12:37:14 -0500554 return;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100555 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
556 LM_FLAG_PRIORITY);
Steven Whitehouse921169c2010-11-29 12:50:38 +0000557 GLOCK_BUG_ON(gl, gl->gl_state == target);
558 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100559 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
560 glops->go_inval) {
561 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
562 do_error(gl, 0); /* Fail queued try locks */
563 }
Steven Whitehouse47a25382010-11-30 15:49:31 +0000564 gl->gl_req = target;
Steven Whitehousea2457692012-01-20 10:38:36 +0000565 set_bit(GLF_BLOCKING, &gl->gl_flags);
566 if ((gl->gl_req == LM_ST_UNLOCKED) ||
567 (gl->gl_state == LM_ST_EXCLUSIVE) ||
568 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
569 clear_bit(GLF_BLOCKING, &gl->gl_flags);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500570 spin_unlock(&gl->gl_lockref.lock);
Bob Peterson06dfc302012-10-24 14:41:05 -0400571 if (glops->go_sync)
572 glops->go_sync(gl);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100573 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
574 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
575 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
576
577 gfs2_glock_hold(gl);
Steven Whitehouse921169c2010-11-29 12:50:38 +0000578 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
579 /* lock_dlm */
580 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
Benjamin Marzinski3e11e532016-03-23 14:29:59 -0400581 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
582 target == LM_ST_UNLOCKED &&
583 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
584 finish_xmote(gl, target);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500585 gfs2_glock_queue_work(gl, 0);
Benjamin Marzinski3e11e532016-03-23 14:29:59 -0400586 }
587 else if (ret) {
Bob Petersone54c78a2018-10-03 08:47:36 -0500588 fs_err(sdp, "lm_lock ret %d\n", ret);
Bob Petersoneb43e662019-11-14 09:52:15 -0500589 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
David Teiglanddba2d702012-11-14 13:46:53 -0500590 }
Steven Whitehouse921169c2010-11-29 12:50:38 +0000591 } else { /* lock_nolock */
592 finish_xmote(gl, target);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500593 gfs2_glock_queue_work(gl, 0);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100594 }
Steven Whitehouse921169c2010-11-29 12:50:38 +0000595
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500596 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100597}
598
599/**
600 * find_first_holder - find the first "holder" gh
601 * @gl: the glock
602 */
603
604static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
605{
606 struct gfs2_holder *gh;
607
608 if (!list_empty(&gl->gl_holders)) {
609 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
610 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
611 return gh;
612 }
613 return NULL;
614}
615
616/**
617 * run_queue - do all outstanding tasks related to a glock
618 * @gl: The glock in question
619 * @nonblock: True if we must not block in run_queue
620 *
621 */
622
623static void run_queue(struct gfs2_glock *gl, const int nonblock)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500624__releases(&gl->gl_lockref.lock)
625__acquires(&gl->gl_lockref.lock)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100626{
627 struct gfs2_holder *gh = NULL;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000628 int ret;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100629
630 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
631 return;
632
633 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
634
635 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
636 gl->gl_demote_state != gl->gl_state) {
637 if (find_first_holder(gl))
Steven Whitehoused8348de2009-02-05 10:12:38 +0000638 goto out_unlock;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100639 if (nonblock)
640 goto out_sched;
641 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
Steven Whitehouse265d529c2008-07-07 10:02:36 +0100642 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100643 gl->gl_target = gl->gl_demote_state;
644 } else {
645 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
646 gfs2_demote_wake(gl);
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000647 ret = do_promote(gl);
648 if (ret == 0)
Steven Whitehoused8348de2009-02-05 10:12:38 +0000649 goto out_unlock;
Steven Whitehouse813e0c42008-11-18 13:38:48 +0000650 if (ret == 2)
Steven Whitehousea228df62009-04-07 14:01:34 +0100651 goto out;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100652 gh = find_first_waiter(gl);
653 gl->gl_target = gh->gh_state;
654 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
655 do_error(gl, 0); /* Fail queued try locks */
656 }
657 do_xmote(gl, gh, gl->gl_target);
Steven Whitehousea228df62009-04-07 14:01:34 +0100658out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100659 return;
660
661out_sched:
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100662 clear_bit(GLF_LOCK, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100663 smp_mb__after_atomic();
Steven Whitehousee66cf162013-10-15 15:18:08 +0100664 gl->gl_lockref.count++;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500665 __gfs2_glock_queue_work(gl, 0);
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100666 return;
667
Steven Whitehoused8348de2009-02-05 10:12:38 +0000668out_unlock:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100669 clear_bit(GLF_LOCK, &gl->gl_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100670 smp_mb__after_atomic();
Steven Whitehouse7e71c552009-09-22 10:56:16 +0100671 return;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100672}
673
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500674static void delete_work_func(struct work_struct *work)
675{
676 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
Bob Peterson15562c42015-03-16 11:52:05 -0500677 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Andreas Gruenbacherec5ec662016-06-14 12:23:59 -0500678 struct inode *inode;
Steven Whitehouse044b9412010-11-03 20:01:07 +0000679 u64 no_addr = gl->gl_name.ln_number;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500680
Bob Petersona4923862015-12-07 16:24:27 -0600681 /* If someone's using this glock to create a new dinode, the block must
682 have been freed by another node, then re-used, in which case our
683 iopen callback is too late after the fact. Ignore it. */
684 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
685 goto out;
686
Andreas Gruenbacherec5ec662016-06-14 12:23:59 -0500687 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
Kefeng Wang15a798f2019-06-05 22:24:24 +0800688 if (!IS_ERR_OR_NULL(inode)) {
Steven Whitehouse044b9412010-11-03 20:01:07 +0000689 d_prune_aliases(inode);
690 iput(inode);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500691 }
Bob Petersona4923862015-12-07 16:24:27 -0600692out:
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500693 gfs2_glock_put(gl);
694}
695
Steven Whitehouse6802e342008-05-21 17:03:22 +0100696static void glock_work_func(struct work_struct *work)
697{
698 unsigned long delay = 0;
699 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500700 unsigned int drop_refs = 1;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100701
Steven Whitehouse26bb7502009-11-27 10:31:11 +0000702 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
Steven Whitehouse6802e342008-05-21 17:03:22 +0100703 finish_xmote(gl, gl->gl_reply);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500704 drop_refs++;
Steven Whitehouse26bb7502009-11-27 10:31:11 +0000705 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500706 spin_lock(&gl->gl_lockref.lock);
Bob Petersonf90e5b52011-05-24 10:44:42 -0400707 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
Steven Whitehouse265d529c2008-07-07 10:02:36 +0100708 gl->gl_state != LM_ST_UNLOCKED &&
709 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
Steven Whitehouse6802e342008-05-21 17:03:22 +0100710 unsigned long holdtime, now = jiffies;
Bob Petersonf90e5b52011-05-24 10:44:42 -0400711
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400712 holdtime = gl->gl_tchange + gl->gl_hold_time;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100713 if (time_before(now, holdtime))
714 delay = holdtime - now;
Bob Petersonf90e5b52011-05-24 10:44:42 -0400715
716 if (!delay) {
717 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
718 set_bit(GLF_DEMOTE, &gl->gl_flags);
719 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100720 }
721 run_queue(gl, 0);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500722 if (delay) {
723 /* Keep one glock reference for the work we requeue. */
724 drop_refs--;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400725 if (gl->gl_name.ln_type != LM_TYPE_INODE)
726 delay = 0;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500727 __gfs2_glock_queue_work(gl, delay);
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400728 }
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -0500729
730 /*
731 * Drop the remaining glock references manually here. (Mind that
732 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
733 * here as well.)
734 */
735 gl->gl_lockref.count -= drop_refs;
736 if (!gl->gl_lockref.count) {
737 __gfs2_glock_put(gl);
738 return;
739 }
740 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100741}
742
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500743static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
744 struct gfs2_glock *new)
745{
746 struct wait_glock_queue wait;
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500747 wait_queue_head_t *wq = glock_waitqueue(name);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500748 struct gfs2_glock *gl;
749
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500750 wait.name = name;
751 init_wait(&wait.wait);
752 wait.wait.func = glock_wake_function;
753
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500754again:
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500755 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500756 rcu_read_lock();
757 if (new) {
758 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
759 &new->gl_node, ht_parms);
760 if (IS_ERR(gl))
761 goto out;
762 } else {
763 gl = rhashtable_lookup_fast(&gl_hash_table,
764 name, ht_parms);
765 }
766 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
767 rcu_read_unlock();
768 schedule();
769 goto again;
770 }
771out:
772 rcu_read_unlock();
Andreas Gruenbachera91323e2017-08-04 07:40:45 -0500773 finish_wait(wq, &wait.wait);
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500774 return gl;
775}
776
David Teiglandb3b94fa2006-01-16 16:50:04 +0000777/**
778 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
779 * @sdp: The GFS2 superblock
780 * @number: the lock number
781 * @glops: The glock_operations to use
782 * @create: If 0, don't create the glock if it doesn't exist
783 * @glp: the glock is returned here
784 *
785 * This does not lock a glock, just finds/creates structures for one.
786 *
787 * Returns: errno
788 */
789
Steven Whitehousecd915492006-09-04 12:49:07 -0400790int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400791 const struct gfs2_glock_operations *glops, int create,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000792 struct gfs2_glock **glp)
793{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000794 struct super_block *s = sdp->sd_vfs;
Bob Peterson15562c42015-03-16 11:52:05 -0500795 struct lm_lockname name = { .ln_number = number,
796 .ln_type = glops->go_type,
797 .ln_sbd = sdp };
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100798 struct gfs2_glock *gl, *tmp;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000799 struct address_space *mapping;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000800 struct kmem_cache *cachep;
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100801 int ret = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000802
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500803 gl = find_insert_glock(&name, NULL);
804 if (gl) {
805 *glp = gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000806 return 0;
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500807 }
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000808 if (!create)
809 return -ENOENT;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000810
Steven Whitehouse009d8512009-12-08 12:12:13 +0000811 if (glops->go_flags & GLOF_ASPACE)
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000812 cachep = gfs2_glock_aspace_cachep;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000813 else
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000814 cachep = gfs2_glock_cachep;
Steven Whitehousefe0bbd22014-06-23 14:50:20 +0100815 gl = kmem_cache_alloc(cachep, GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000816 if (!gl)
817 return -ENOMEM;
818
David Teiglanddba2d702012-11-14 13:46:53 -0500819 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
David Teiglanddba2d702012-11-14 13:46:53 -0500820
821 if (glops->go_flags & GLOF_LVB) {
Steven Whitehousefe0bbd22014-06-23 14:50:20 +0100822 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
David Teigland4e2f8842012-11-14 13:47:37 -0500823 if (!gl->gl_lksb.sb_lvbptr) {
David Teiglanddba2d702012-11-14 13:46:53 -0500824 kmem_cache_free(cachep, gl);
825 return -ENOMEM;
826 }
David Teiglanddba2d702012-11-14 13:46:53 -0500827 }
828
Steven Whitehouse8f052282010-01-29 15:21:27 +0000829 atomic_inc(&sdp->sd_glock_disposal);
Bob Peterson88ffbf32015-03-16 11:02:46 -0500830 gl->gl_node.next = NULL;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400831 gl->gl_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000832 gl->gl_name = name;
Steven Whitehousee66cf162013-10-15 15:18:08 +0100833 gl->gl_lockref.count = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000834 gl->gl_state = LM_ST_UNLOCKED;
Steven Whitehouse6802e342008-05-21 17:03:22 +0100835 gl->gl_target = LM_ST_UNLOCKED;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500836 gl->gl_demote_state = LM_ST_EXCLUSIVE;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000837 gl->gl_ops = glops;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100838 gl->gl_dstamp = 0;
Steven Whitehousea2457692012-01-20 10:38:36 +0000839 preempt_disable();
840 /* We use the global stats to estimate the initial per-glock stats */
841 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
842 preempt_enable();
843 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
844 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500845 gl->gl_tchange = jiffies;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400846 gl->gl_object = NULL;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400847 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -0500848 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500849 INIT_WORK(&gl->gl_delete, delete_work_func);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000850
Steven Whitehouse009d8512009-12-08 12:12:13 +0000851 mapping = gfs2_glock2aspace(gl);
852 if (mapping) {
853 mapping->a_ops = &gfs2_meta_aops;
854 mapping->host = s->s_bdev->bd_inode;
855 mapping->flags = 0;
856 mapping_set_gfp_mask(mapping, GFP_NOFS);
Rafael Aquini252aa6f2012-12-11 16:02:35 -0800857 mapping->private_data = NULL;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000858 mapping->writeback_index = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000859 }
860
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500861 tmp = find_insert_glock(&name, gl);
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100862 if (!tmp) {
Bob Peterson88ffbf32015-03-16 11:02:46 -0500863 *glp = gl;
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100864 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000865 }
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100866 if (IS_ERR(tmp)) {
867 ret = PTR_ERR(tmp);
868 goto out_free;
869 }
Andreas Gruenbacher05154802017-08-01 11:18:26 -0500870 *glp = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000871
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100872out_free:
Bob Peterson88ffbf32015-03-16 11:02:46 -0500873 kfree(gl->gl_lksb.sb_lvbptr);
874 kmem_cache_free(cachep, gl);
875 atomic_dec(&sdp->sd_glock_disposal);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000876
Andreas Gruenbacher0a52aba2017-02-21 23:19:10 +0100877out:
Bob Peterson88ffbf32015-03-16 11:02:46 -0500878 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000879}
880
881/**
882 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
883 * @gl: the glock
884 * @state: the state we're requesting
885 * @flags: the modifier flags
886 * @gh: the holder structure
887 *
888 */
889
Bob Petersonb58bf402015-07-24 09:45:43 -0500890void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000891 struct gfs2_holder *gh)
892{
893 INIT_LIST_HEAD(&gh->gh_list);
894 gh->gh_gl = gl;
Fabian Frederickd29c0af2014-10-03 20:15:36 +0200895 gh->gh_ip = _RET_IP_;
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800896 gh->gh_owner_pid = get_pid(task_pid(current));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000897 gh->gh_state = state;
898 gh->gh_flags = flags;
899 gh->gh_error = 0;
900 gh->gh_iflags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000901 gfs2_glock_hold(gl);
902}
903
904/**
905 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
906 * @state: the state we're requesting
907 * @flags: the modifier flags
908 * @gh: the holder structure
909 *
910 * Don't mess with the glock.
911 *
912 */
913
Bob Petersonb58bf402015-07-24 09:45:43 -0500914void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000915{
916 gh->gh_state = state;
Steven Whitehouse579b78a2006-04-26 14:58:26 -0400917 gh->gh_flags = flags;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000918 gh->gh_iflags = 0;
Fabian Frederickd29c0af2014-10-03 20:15:36 +0200919 gh->gh_ip = _RET_IP_;
Markus Elfring30badc92014-11-18 11:31:23 +0100920 put_pid(gh->gh_owner_pid);
Bob Peterson1a0eae82010-04-14 11:58:16 -0400921 gh->gh_owner_pid = get_pid(task_pid(current));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000922}
923
924/**
925 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
926 * @gh: the holder structure
927 *
928 */
929
930void gfs2_holder_uninit(struct gfs2_holder *gh)
931{
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -0800932 put_pid(gh->gh_owner_pid);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000933 gfs2_glock_put(gh->gh_gl);
Andreas Gruenbacher6df9f9a2016-06-17 07:31:27 -0500934 gfs2_holder_mark_uninitialized(gh);
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500935 gh->gh_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000936}
937
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500938static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
939 unsigned long start_time)
940{
941 /* Have we waited longer that a second? */
942 if (time_after(jiffies, start_time + HZ)) {
943 /* Lengthen the minimum hold time. */
944 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
945 GL_GLOCK_MAX_HOLD);
946 }
947}
948
Steven Whitehousefe64d512009-05-19 10:01:18 +0100949/**
Bob Peterson07a79042012-08-09 12:48:44 -0500950 * gfs2_glock_wait - wait on a glock acquisition
951 * @gh: the glock holder
952 *
953 * Returns: 0 on success
954 */
955
956int gfs2_glock_wait(struct gfs2_holder *gh)
Steven Whitehousefee852e2007-01-17 15:33:23 +0000957{
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500958 unsigned long start_time = jiffies;
Bob Peterson7cf8dcd2011-06-15 11:41:48 -0400959
Steven Whitehousefee852e2007-01-17 15:33:23 +0000960 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +1000961 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
Andreas Gruenbacher01123cf2019-08-30 12:31:01 -0500962 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
Bob Peterson07a79042012-08-09 12:48:44 -0500963 return gh->gh_error;
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100964}
965
Bob Petersonad269672019-08-30 12:31:02 -0500966static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
967{
968 int i;
969
970 for (i = 0; i < num_gh; i++)
971 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
972 return 1;
973 return 0;
974}
975
976/**
977 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
978 * @num_gh: the number of holders in the array
979 * @ghs: the glock holder array
980 *
981 * Returns: 0 on success, meaning all glocks have been granted and are held.
982 * -ESTALE if the request timed out, meaning all glocks were released,
983 * and the caller should retry the operation.
984 */
985
986int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
987{
988 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
989 int i, ret = 0, timeout = 0;
990 unsigned long start_time = jiffies;
991 bool keep_waiting;
992
993 might_sleep();
994 /*
995 * Total up the (minimum hold time * 2) of all glocks and use that to
996 * determine the max amount of time we should wait.
997 */
998 for (i = 0; i < num_gh; i++)
999 timeout += ghs[i].gh_gl->gl_hold_time << 1;
1000
1001wait_for_dlm:
1002 if (!wait_event_timeout(sdp->sd_async_glock_wait,
1003 !glocks_pending(num_gh, ghs), timeout))
1004 ret = -ESTALE; /* request timed out. */
1005
1006 /*
1007 * If dlm granted all our requests, we need to adjust the glock
1008 * minimum hold time values according to how long we waited.
1009 *
1010 * If our request timed out, we need to repeatedly release any held
1011 * glocks we acquired thus far to allow dlm to acquire the remaining
1012 * glocks without deadlocking. We cannot currently cancel outstanding
1013 * glock acquisitions.
1014 *
1015 * The HIF_WAIT bit tells us which requests still need a response from
1016 * dlm.
1017 *
1018 * If dlm sent us any errors, we return the first error we find.
1019 */
1020 keep_waiting = false;
1021 for (i = 0; i < num_gh; i++) {
1022 /* Skip holders we have already dequeued below. */
1023 if (!gfs2_holder_queued(&ghs[i]))
1024 continue;
1025 /* Skip holders with a pending DLM response. */
1026 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
1027 keep_waiting = true;
1028 continue;
1029 }
1030
1031 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
1032 if (ret == -ESTALE)
1033 gfs2_glock_dq(&ghs[i]);
1034 else
1035 gfs2_glock_update_hold_time(ghs[i].gh_gl,
1036 start_time);
1037 }
1038 if (!ret)
1039 ret = ghs[i].gh_error;
1040 }
1041
1042 if (keep_waiting)
1043 goto wait_for_dlm;
1044
1045 /*
1046 * At this point, we've either acquired all locks or released them all.
1047 */
1048 return ret;
1049}
1050
David Teiglandb3b94fa2006-01-16 16:50:04 +00001051/**
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001052 * handle_callback - process a demote request
David Teiglandb3b94fa2006-01-16 16:50:04 +00001053 * @gl: the glock
1054 * @state: the state the caller wants us to change to
1055 *
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001056 * There are only two requests that we are going to see in actual
1057 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teiglandb3b94fa2006-01-16 16:50:04 +00001058 */
1059
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001060static void handle_callback(struct gfs2_glock *gl, unsigned int state,
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001061 unsigned long delay, bool remote)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001062{
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001063 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
1064
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001065 set_bit(bit, &gl->gl_flags);
1066 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001067 gl->gl_demote_state = state;
1068 gl->gl_demote_time = jiffies;
Josef Whiter26caee52007-07-23 10:02:40 +01001069 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
1070 gl->gl_demote_state != state) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001071 gl->gl_demote_state = LM_ST_UNLOCKED;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001072 }
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05001073 if (gl->gl_ops->go_callback)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001074 gl->gl_ops->go_callback(gl, remote);
Steven Whitehouse7bd8b2e2013-04-10 10:32:05 +01001075 trace_gfs2_demote_rq(gl, remote);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001076}
1077
Steven Whitehouse6802e342008-05-21 17:03:22 +01001078void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
Robert Peterson7c52b162007-03-16 10:26:37 +00001079{
Joe Perches5e690692010-11-09 16:35:20 -08001080 struct va_format vaf;
Robert Peterson7c52b162007-03-16 10:26:37 +00001081 va_list args;
1082
1083 va_start(args, fmt);
Joe Perches5e690692010-11-09 16:35:20 -08001084
Steven Whitehouse6802e342008-05-21 17:03:22 +01001085 if (seq) {
Steven Whitehouse1bb49302012-06-11 13:26:50 +01001086 seq_vprintf(seq, fmt, args);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001087 } else {
Joe Perches5e690692010-11-09 16:35:20 -08001088 vaf.fmt = fmt;
1089 vaf.va = &args;
1090
Joe Perchesd77d1b52014-03-06 12:10:45 -08001091 pr_err("%pV", &vaf);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001092 }
Joe Perches5e690692010-11-09 16:35:20 -08001093
Robert Peterson7c52b162007-03-16 10:26:37 +00001094 va_end(args);
1095}
1096
David Teiglandb3b94fa2006-01-16 16:50:04 +00001097/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001098 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1099 * @gh: the holder structure to add
1100 *
Steven Whitehouse6802e342008-05-21 17:03:22 +01001101 * Eventually we should move the recursive locking trap to a
1102 * debugging option or something like that. This is the fast
1103 * path and needs to have the minimum number of distractions.
1104 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001105 */
1106
Steven Whitehouse6802e342008-05-21 17:03:22 +01001107static inline void add_to_queue(struct gfs2_holder *gh)
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001108__releases(&gl->gl_lockref.lock)
1109__acquires(&gl->gl_lockref.lock)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001110{
1111 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001112 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001113 struct list_head *insert_pt = NULL;
1114 struct gfs2_holder *gh2;
Bob Petersone5dc76b2012-08-09 12:48:46 -05001115 int try_futile = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001116
Bob Petersonad269672019-08-30 12:31:02 -05001117 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
Steven Whitehousefee852e2007-01-17 15:33:23 +00001118 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
Bob Petersonad269672019-08-30 12:31:02 -05001119 GLOCK_BUG_ON(gl, true);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001120
Steven Whitehouse6802e342008-05-21 17:03:22 +01001121 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1122 if (test_bit(GLF_LOCK, &gl->gl_flags))
Bob Petersone5dc76b2012-08-09 12:48:46 -05001123 try_futile = !may_grant(gl, gh);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001124 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1125 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001126 }
1127
Steven Whitehouse6802e342008-05-21 17:03:22 +01001128 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1129 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1130 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1131 goto trap_recursive;
Bob Petersone5dc76b2012-08-09 12:48:46 -05001132 if (try_futile &&
1133 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001134fail:
1135 gh->gh_error = GLR_TRYFAILED;
1136 gfs2_holder_wake(gh);
1137 return;
1138 }
1139 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1140 continue;
1141 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1142 insert_pt = &gh2->gh_list;
1143 }
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001144 set_bit(GLF_QUEUED, &gl->gl_flags);
Steven Whitehouseedae38a2011-01-31 09:38:12 +00001145 trace_gfs2_glock_queue(gh, 1);
Steven Whitehousea2457692012-01-20 10:38:36 +00001146 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1147 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001148 if (likely(insert_pt == NULL)) {
1149 list_add_tail(&gh->gh_list, &gl->gl_holders);
1150 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1151 goto do_cancel;
1152 return;
1153 }
1154 list_add_tail(&gh->gh_list, insert_pt);
1155do_cancel:
1156 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1157 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001158 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse048bca22008-05-23 14:46:04 +01001159 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001160 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001161 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001162 }
1163 return;
1164
1165trap_recursive:
Bob Petersone54c78a2018-10-03 08:47:36 -05001166 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1167 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1168 fs_err(sdp, "lock type: %d req lock state : %d\n",
Steven Whitehouse6802e342008-05-21 17:03:22 +01001169 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
Bob Petersone54c78a2018-10-03 08:47:36 -05001170 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1171 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1172 fs_err(sdp, "lock type: %d req lock state : %d\n",
Steven Whitehouse6802e342008-05-21 17:03:22 +01001173 gh->gh_gl->gl_name.ln_type, gh->gh_state);
Bob Peterson3792ce92019-05-09 09:21:48 -05001174 gfs2_dump_glock(NULL, gl, true);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001175 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001176}
1177
1178/**
1179 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1180 * @gh: the holder structure
1181 *
1182 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1183 *
1184 * Returns: 0, GLR_TRYFAILED, or errno on failure
1185 */
1186
1187int gfs2_glock_nq(struct gfs2_holder *gh)
1188{
1189 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001190 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001191 int error = 0;
1192
Bob Petersoneb43e662019-11-14 09:52:15 -05001193 if (unlikely(gfs2_withdrawn(sdp)))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001194 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001195
Steven Whitehousef42ab082011-04-14 16:50:31 +01001196 if (test_bit(GLF_LRU, &gl->gl_flags))
1197 gfs2_glock_remove_from_lru(gl);
1198
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001199 spin_lock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001200 add_to_queue(gh);
Bob Peterson01b172b2014-03-12 10:32:20 -04001201 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1202 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001203 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
Bob Peterson01b172b2014-03-12 10:32:20 -04001204 gl->gl_lockref.count++;
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001205 __gfs2_glock_queue_work(gl, 0);
Bob Peterson01b172b2014-03-12 10:32:20 -04001206 }
Steven Whitehouse6802e342008-05-21 17:03:22 +01001207 run_queue(gl, 1);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001208 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001209
Steven Whitehouse6802e342008-05-21 17:03:22 +01001210 if (!(gh->gh_flags & GL_ASYNC))
1211 error = gfs2_glock_wait(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001212
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213 return error;
1214}
1215
1216/**
1217 * gfs2_glock_poll - poll to see if an async request has been completed
1218 * @gh: the holder
1219 *
1220 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1221 */
1222
1223int gfs2_glock_poll(struct gfs2_holder *gh)
1224{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001225 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001226}
1227
1228/**
1229 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1230 * @gh: the glock holder
1231 *
1232 */
1233
1234void gfs2_glock_dq(struct gfs2_holder *gh)
1235{
1236 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001237 const struct gfs2_glock_operations *glops = gl->gl_ops;
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001238 unsigned delay = 0;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001239 int fast_path = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001240
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001241 spin_lock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001242 if (gh->gh_flags & GL_NOCACHE)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001243 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001244
David Teiglandb3b94fa2006-01-16 16:50:04 +00001245 list_del_init(&gh->gh_list);
Bob Peterson7508abc2015-12-18 11:54:55 -06001246 clear_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001247 if (find_first_holder(gl) == NULL) {
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +00001248 if (glops->go_unlock) {
Steven Whitehouse6802e342008-05-21 17:03:22 +01001249 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001250 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001251 glops->go_unlock(gh);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001252 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001253 clear_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +00001254 }
Steven Whitehouse6802e342008-05-21 17:03:22 +01001255 if (list_empty(&gl->gl_holders) &&
1256 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1257 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1258 fast_path = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001259 }
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001260 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
Bob Peterson4abb6ad92012-08-09 12:48:43 -05001261 gfs2_glock_add_to_lru(gl);
1262
Steven Whitehouse63997772009-06-12 08:49:20 +01001263 trace_gfs2_glock_queue(gh, 0);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001264 if (unlikely(!fast_path)) {
1265 gl->gl_lockref.count++;
1266 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1267 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1268 gl->gl_name.ln_type == LM_TYPE_INODE)
1269 delay = gl->gl_hold_time;
1270 __gfs2_glock_queue_work(gl, delay);
1271 }
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001272 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001273}
1274
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001275void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1276{
1277 struct gfs2_glock *gl = gh->gh_gl;
1278 gfs2_glock_dq(gh);
Bob Peterson81e1d452012-08-09 12:48:45 -05001279 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001280 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001281}
1282
David Teiglandb3b94fa2006-01-16 16:50:04 +00001283/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001284 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1285 * @gh: the holder structure
1286 *
1287 */
1288
1289void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1290{
1291 gfs2_glock_dq(gh);
1292 gfs2_holder_uninit(gh);
1293}
1294
1295/**
1296 * gfs2_glock_nq_num - acquire a glock based on lock number
1297 * @sdp: the filesystem
1298 * @number: the lock number
1299 * @glops: the glock operations for the type of glock
1300 * @state: the state to acquire the glock in
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001301 * @flags: modifier flags for the acquisition
David Teiglandb3b94fa2006-01-16 16:50:04 +00001302 * @gh: the struct gfs2_holder
1303 *
1304 * Returns: errno
1305 */
1306
Steven Whitehousecd915492006-09-04 12:49:07 -04001307int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001308 const struct gfs2_glock_operations *glops,
Bob Petersonb58bf402015-07-24 09:45:43 -05001309 unsigned int state, u16 flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001310{
1311 struct gfs2_glock *gl;
1312 int error;
1313
1314 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1315 if (!error) {
1316 error = gfs2_glock_nq_init(gl, state, flags, gh);
1317 gfs2_glock_put(gl);
1318 }
1319
1320 return error;
1321}
1322
1323/**
1324 * glock_compare - Compare two struct gfs2_glock structures for sorting
1325 * @arg_a: the first structure
1326 * @arg_b: the second structure
1327 *
1328 */
1329
1330static int glock_compare(const void *arg_a, const void *arg_b)
1331{
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001332 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1333 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1334 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1335 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001336
1337 if (a->ln_number > b->ln_number)
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001338 return 1;
1339 if (a->ln_number < b->ln_number)
1340 return -1;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -05001341 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
Steven Whitehousea5e08a9e2006-09-09 17:07:05 -04001342 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001343}
1344
1345/**
1346 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1347 * @num_gh: the number of structures
1348 * @ghs: an array of struct gfs2_holder structures
1349 *
1350 * Returns: 0 on success (all glocks acquired),
1351 * errno on failure (no glocks acquired)
1352 */
1353
1354static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1355 struct gfs2_holder **p)
1356{
1357 unsigned int x;
1358 int error = 0;
1359
1360 for (x = 0; x < num_gh; x++)
1361 p[x] = &ghs[x];
1362
1363 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1364
1365 for (x = 0; x < num_gh; x++) {
1366 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1367
1368 error = gfs2_glock_nq(p[x]);
1369 if (error) {
1370 while (x--)
1371 gfs2_glock_dq(p[x]);
1372 break;
1373 }
1374 }
1375
1376 return error;
1377}
1378
1379/**
1380 * gfs2_glock_nq_m - acquire multiple glocks
1381 * @num_gh: the number of structures
1382 * @ghs: an array of struct gfs2_holder structures
1383 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001384 *
1385 * Returns: 0 on success (all glocks acquired),
1386 * errno on failure (no glocks acquired)
1387 */
1388
1389int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1390{
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001391 struct gfs2_holder *tmp[4];
1392 struct gfs2_holder **pph = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001393 int error = 0;
1394
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001395 switch(num_gh) {
1396 case 0:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001397 return 0;
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001398 case 1:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001399 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1400 return gfs2_glock_nq(ghs);
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001401 default:
1402 if (num_gh <= 4)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001403 break;
Kees Cook6da2ec52018-06-12 13:55:00 -07001404 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1405 GFP_NOFS);
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001406 if (!pph)
1407 return -ENOMEM;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001408 }
1409
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001410 error = nq_m_sync(num_gh, ghs, pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001411
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001412 if (pph != tmp)
1413 kfree(pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001414
1415 return error;
1416}
1417
1418/**
1419 * gfs2_glock_dq_m - release multiple glocks
1420 * @num_gh: the number of structures
1421 * @ghs: an array of struct gfs2_holder structures
1422 *
1423 */
1424
1425void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1426{
Bob Petersonfa1bbde2011-03-10 11:41:57 -05001427 while (num_gh--)
1428 gfs2_glock_dq(&ghs[num_gh]);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001429}
1430
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001431void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
Steven Whitehouseda755fd2008-01-30 15:34:04 +00001432{
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05001433 unsigned long delay = 0;
1434 unsigned long holdtime;
1435 unsigned long now = jiffies;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001436
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001437 gfs2_glock_hold(gl);
Bob Peterson7cf8dcd2011-06-15 11:41:48 -04001438 holdtime = gl->gl_tchange + gl->gl_hold_time;
1439 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1440 gl->gl_name.ln_type == LM_TYPE_INODE) {
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001441 if (time_before(now, holdtime))
1442 delay = holdtime - now;
1443 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
Bob Peterson7cf8dcd2011-06-15 11:41:48 -04001444 delay = gl->gl_hold_time;
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001445 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001446
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001447 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001448 handle_callback(gl, state, delay, true);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001449 __gfs2_glock_queue_work(gl, delay);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001450 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001451}
1452
1453/**
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001454 * gfs2_should_freeze - Figure out if glock should be frozen
1455 * @gl: The glock in question
1456 *
1457 * Glocks are not frozen if (a) the result of the dlm operation is
1458 * an error, (b) the locking operation was an unlock operation or
1459 * (c) if there is a "noexp" flagged request anywhere in the queue
1460 *
1461 * Returns: 1 if freezing should occur, 0 otherwise
1462 */
1463
1464static int gfs2_should_freeze(const struct gfs2_glock *gl)
1465{
1466 const struct gfs2_holder *gh;
1467
1468 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1469 return 0;
1470 if (gl->gl_target == LM_ST_UNLOCKED)
1471 return 0;
1472
1473 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1474 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1475 continue;
1476 if (LM_FLAG_NOEXP & gh->gh_flags)
1477 return 0;
1478 }
1479
1480 return 1;
1481}
1482
1483/**
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001484 * gfs2_glock_complete - Callback used by locking
1485 * @gl: Pointer to the glock
1486 * @ret: The return value from the dlm
David Teiglandb3b94fa2006-01-16 16:50:04 +00001487 *
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001488 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
Steven Whitehouse47a25382010-11-30 15:49:31 +00001489 * to use a bitfield shared with other glock state fields.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001490 */
1491
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001492void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001493{
Bob Peterson15562c42015-03-16 11:52:05 -05001494 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001495
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001496 spin_lock(&gl->gl_lockref.lock);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001497 gl->gl_reply = ret;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001498
David Teiglande0c2a9a2012-01-09 17:18:05 -05001499 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001500 if (gfs2_should_freeze(gl)) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001501 set_bit(GLF_FROZEN, &gl->gl_flags);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001502 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001503 return;
Steven Whitehouse0809f6e2010-08-02 10:15:17 +01001504 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001505 }
Steven Whitehouse47a25382010-11-30 15:49:31 +00001506
Steven Whitehousee66cf162013-10-15 15:18:08 +01001507 gl->gl_lockref.count++;
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001508 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001509 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001510 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001511}
1512
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001513static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1514{
1515 struct gfs2_glock *gla, *glb;
1516
1517 gla = list_entry(a, struct gfs2_glock, gl_lru);
1518 glb = list_entry(b, struct gfs2_glock, gl_lru);
1519
1520 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1521 return 1;
1522 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1523 return -1;
1524
1525 return 0;
1526}
1527
1528/**
1529 * gfs2_dispose_glock_lru - Demote a list of glocks
1530 * @list: The list to dispose of
1531 *
1532 * Disposing of glocks may involve disk accesses, so that here we sort
1533 * the glocks by number (i.e. disk location of the inodes) so that if
1534 * there are any such accesses, they'll be sent in order (mostly).
1535 *
1536 * Must be called under the lru_lock, but may drop and retake this
1537 * lock. While the lru_lock is dropped, entries may vanish from the
1538 * list, but no new entries will appear on the list (since it is
1539 * private)
1540 */
1541
1542static void gfs2_dispose_glock_lru(struct list_head *list)
1543__releases(&lru_lock)
1544__acquires(&lru_lock)
1545{
1546 struct gfs2_glock *gl;
1547
1548 list_sort(NULL, list, glock_cmp);
1549
1550 while(!list_empty(list)) {
1551 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1552 list_del_init(&gl->gl_lru);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001553 if (!spin_trylock(&gl->gl_lockref.lock)) {
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001554add_back_to_lru:
Steven Whitehousee66cf162013-10-15 15:18:08 +01001555 list_add(&gl->gl_lru, &lru_list);
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001556 set_bit(GLF_LRU, &gl->gl_flags);
Steven Whitehousee66cf162013-10-15 15:18:08 +01001557 atomic_inc(&lru_count);
1558 continue;
1559 }
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001560 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001561 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001562 goto add_back_to_lru;
1563 }
Steven Whitehousee66cf162013-10-15 15:18:08 +01001564 gl->gl_lockref.count++;
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001565 if (demote_ok(gl))
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001566 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001567 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001568 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001569 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001570 cond_resched_lock(&lru_lock);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001571 }
1572}
1573
Steven Whitehouse2a005852012-12-14 12:28:30 +00001574/**
1575 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1576 * @nr: The number of entries to scan
1577 *
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001578 * This function selects the entries on the LRU which are able to
1579 * be demoted, and then kicks off the process by calling
1580 * gfs2_dispose_glock_lru() above.
Steven Whitehouse2a005852012-12-14 12:28:30 +00001581 */
David Teiglandb3b94fa2006-01-16 16:50:04 +00001582
Dave Chinner1ab6c492013-08-28 10:18:09 +10001583static long gfs2_scan_glock_lru(int nr)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001584{
1585 struct gfs2_glock *gl;
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001586 LIST_HEAD(skipped);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001587 LIST_HEAD(dispose);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001588 long freed = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001589
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001590 spin_lock(&lru_lock);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001591 while ((nr-- >= 0) && !list_empty(&lru_list)) {
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001592 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001593
1594 /* Test for being demotable */
Steven Whitehouse94a09a32014-06-23 14:43:32 +01001595 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001596 list_move(&gl->gl_lru, &dispose);
1597 atomic_dec(&lru_count);
Ross Lagerwall7881ef32019-03-27 17:09:17 +00001598 clear_bit(GLF_LRU, &gl->gl_flags);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001599 freed++;
Steven Whitehouse2163b1e2009-06-25 16:30:26 +01001600 continue;
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001601 }
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001602
1603 list_move(&gl->gl_lru, &skipped);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001604 }
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001605 list_splice(&skipped, &lru_list);
Steven Whitehouse4506a5192013-02-01 20:36:03 +00001606 if (!list_empty(&dispose))
1607 gfs2_dispose_glock_lru(&dispose);
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001608 spin_unlock(&lru_lock);
Dave Chinner1ab6c492013-08-28 10:18:09 +10001609
1610 return freed;
Steven Whitehouse2a005852012-12-14 12:28:30 +00001611}
1612
Dave Chinner1ab6c492013-08-28 10:18:09 +10001613static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1614 struct shrink_control *sc)
Steven Whitehouse2a005852012-12-14 12:28:30 +00001615{
Dave Chinner1ab6c492013-08-28 10:18:09 +10001616 if (!(sc->gfp_mask & __GFP_FS))
1617 return SHRINK_STOP;
1618 return gfs2_scan_glock_lru(sc->nr_to_scan);
1619}
Steven Whitehouse2a005852012-12-14 12:28:30 +00001620
Dave Chinner1ab6c492013-08-28 10:18:09 +10001621static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1622 struct shrink_control *sc)
1623{
Glauber Costa55f841c2013-08-28 10:17:53 +10001624 return vfs_pressure_ratio(atomic_read(&lru_count));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001625}
1626
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001627static struct shrinker glock_shrinker = {
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001628 .seeks = DEFAULT_SEEKS,
Dave Chinner1ab6c492013-08-28 10:18:09 +10001629 .count_objects = gfs2_glock_shrink_count,
1630 .scan_objects = gfs2_glock_shrink_scan,
Steven Whitehouse97cc10252008-11-20 13:39:47 +00001631};
1632
David Teiglandb3b94fa2006-01-16 16:50:04 +00001633/**
1634 * examine_bucket - Call a function for glock in a hash bucket
1635 * @examiner: the function
1636 * @sdp: the filesystem
1637 * @bucket: the bucket
1638 *
Herbert Xu98687f42017-02-11 19:26:45 +08001639 * Note that the function can be called multiple times on the same
1640 * object. So the user must ensure that the function can cope with
1641 * that.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001642 */
1643
Bob Peterson88ffbf32015-03-16 11:02:46 -05001644static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001645{
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001646 struct gfs2_glock *gl;
Herbert Xu98687f42017-02-11 19:26:45 +08001647 struct rhashtable_iter iter;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001648
Herbert Xu98687f42017-02-11 19:26:45 +08001649 rhashtable_walk_enter(&gl_hash_table, &iter);
1650
1651 do {
Tom Herbert97a6ec42017-12-04 10:31:41 -08001652 rhashtable_walk_start(&iter);
Herbert Xu98687f42017-02-11 19:26:45 +08001653
1654 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
Bob Peterson27c3b412017-08-18 09:15:13 -05001655 if (gl->gl_name.ln_sbd == sdp &&
Bob Peterson88ffbf32015-03-16 11:02:46 -05001656 lockref_get_not_dead(&gl->gl_lockref))
1657 examiner(gl);
Herbert Xu98687f42017-02-11 19:26:45 +08001658
1659 rhashtable_walk_stop(&iter);
1660 } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1661
1662 rhashtable_walk_exit(&iter);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001663}
1664
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001665/**
1666 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1667 * @gl: The glock to thaw
1668 *
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001669 */
1670
1671static void thaw_glock(struct gfs2_glock *gl)
1672{
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001673 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001674 gfs2_glock_put(gl);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001675 return;
Steven Whitehouse7286b312013-08-20 09:35:09 +01001676 }
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001677 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1678 gfs2_glock_queue_work(gl, 0);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001679}
1680
David Teiglandb3b94fa2006-01-16 16:50:04 +00001681/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001682 * clear_glock - look at a glock and see if we can free it from glock cache
1683 * @gl: the glock to look at
1684 *
1685 */
1686
1687static void clear_glock(struct gfs2_glock *gl)
1688{
Steven Whitehousef42ab082011-04-14 16:50:31 +01001689 gfs2_glock_remove_from_lru(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001690
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001691 spin_lock(&gl->gl_lockref.lock);
Steven Whitehousec741c452010-09-29 14:20:52 +01001692 if (gl->gl_state != LM_ST_UNLOCKED)
Steven Whitehouse81ffbf62013-04-10 10:26:55 +01001693 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
Andreas Gruenbacher6b0c7442017-06-30 08:10:01 -05001694 __gfs2_glock_queue_work(gl, 0);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001695 spin_unlock(&gl->gl_lockref.lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001696}
1697
1698/**
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001699 * gfs2_glock_thaw - Thaw any frozen glocks
1700 * @sdp: The super block
1701 *
1702 */
1703
1704void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1705{
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001706 glock_hash_walk(thaw_glock, sdp);
1707}
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001708
Bob Peterson3792ce92019-05-09 09:21:48 -05001709static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001710{
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001711 spin_lock(&gl->gl_lockref.lock);
Bob Peterson3792ce92019-05-09 09:21:48 -05001712 gfs2_dump_glock(seq, gl, fsid);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001713 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001714}
1715
1716static void dump_glock_func(struct gfs2_glock *gl)
1717{
Bob Peterson3792ce92019-05-09 09:21:48 -05001718 dump_glock(NULL, gl, true);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001719}
1720
1721/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001722 * gfs2_gl_hash_clear - Empty out the glock hash table
1723 * @sdp: the filesystem
1724 * @wait: wait until it's all gone
1725 *
Steven Whitehouse1bdad602008-06-03 14:09:53 +01001726 * Called when unmounting the filesystem.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001727 */
1728
Steven Whitehousefefc03b2008-12-19 15:32:06 +00001729void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001730{
David Teiglandfb6791d2012-11-13 10:58:56 -05001731 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
Bob Peterson222cb532013-04-25 12:49:17 -04001732 flush_workqueue(glock_workqueue);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001733 glock_hash_walk(clear_glock, sdp);
Steven Whitehouse8f052282010-01-29 15:21:27 +00001734 flush_workqueue(glock_workqueue);
Bob Peterson2aba1b52015-05-19 09:11:23 -05001735 wait_event_timeout(sdp->sd_glock_wait,
1736 atomic_read(&sdp->sd_glock_disposal) == 0,
1737 HZ * 600);
Steven Whitehousebc015cb2011-01-19 09:30:01 +00001738 glock_hash_walk(dump_glock_func, sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001739}
1740
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001741void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1742{
1743 struct gfs2_glock *gl = ip->i_gl;
1744 int ret;
1745
1746 ret = gfs2_truncatei_resume(ip);
Bob Peterson15562c42015-03-16 11:52:05 -05001747 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001748
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001749 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001750 clear_bit(GLF_LOCK, &gl->gl_flags);
1751 run_queue(gl, 1);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -05001752 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse813e0c42008-11-18 13:38:48 +00001753}
1754
Steven Whitehouse6802e342008-05-21 17:03:22 +01001755static const char *state2str(unsigned state)
Robert Peterson04b933f2007-03-23 17:05:15 -05001756{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001757 switch(state) {
1758 case LM_ST_UNLOCKED:
1759 return "UN";
1760 case LM_ST_SHARED:
1761 return "SH";
1762 case LM_ST_DEFERRED:
1763 return "DF";
1764 case LM_ST_EXCLUSIVE:
1765 return "EX";
1766 }
1767 return "??";
1768}
Robert Peterson04b933f2007-03-23 17:05:15 -05001769
Bob Petersonb58bf402015-07-24 09:45:43 -05001770static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
Steven Whitehouse6802e342008-05-21 17:03:22 +01001771{
1772 char *p = buf;
1773 if (flags & LM_FLAG_TRY)
1774 *p++ = 't';
1775 if (flags & LM_FLAG_TRY_1CB)
1776 *p++ = 'T';
1777 if (flags & LM_FLAG_NOEXP)
1778 *p++ = 'e';
1779 if (flags & LM_FLAG_ANY)
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001780 *p++ = 'A';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001781 if (flags & LM_FLAG_PRIORITY)
1782 *p++ = 'p';
1783 if (flags & GL_ASYNC)
1784 *p++ = 'a';
1785 if (flags & GL_EXACT)
1786 *p++ = 'E';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001787 if (flags & GL_NOCACHE)
1788 *p++ = 'c';
1789 if (test_bit(HIF_HOLDER, &iflags))
1790 *p++ = 'H';
1791 if (test_bit(HIF_WAIT, &iflags))
1792 *p++ = 'W';
1793 if (test_bit(HIF_FIRST, &iflags))
1794 *p++ = 'F';
1795 *p = 0;
1796 return buf;
Robert Peterson04b933f2007-03-23 17:05:15 -05001797}
1798
David Teiglandb3b94fa2006-01-16 16:50:04 +00001799/**
1800 * dump_holder - print information about a glock holder
Steven Whitehouse6802e342008-05-21 17:03:22 +01001801 * @seq: the seq_file struct
David Teiglandb3b94fa2006-01-16 16:50:04 +00001802 * @gh: the glock holder
Bob Peterson3792ce92019-05-09 09:21:48 -05001803 * @fs_id_buf: pointer to file system id (if requested)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001804 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001805 */
1806
Bob Peterson3792ce92019-05-09 09:21:48 -05001807static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
1808 const char *fs_id_buf)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001809{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001810 struct task_struct *gh_owner = NULL;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001811 char flags_buf[32];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001812
Tetsuo Handa0b3a2c92014-01-02 19:52:20 +09001813 rcu_read_lock();
Steven Whitehouse6802e342008-05-21 17:03:22 +01001814 if (gh->gh_owner_pid)
Pavel Emelyanovb1e058d2008-02-07 00:13:19 -08001815 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
Bob Peterson3792ce92019-05-09 09:21:48 -05001816 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1817 fs_id_buf, state2str(gh->gh_state),
Joe Perchescc181522010-11-05 16:12:36 -07001818 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1819 gh->gh_error,
1820 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1821 gh_owner ? gh_owner->comm : "(ended)",
1822 (void *)gh->gh_ip);
Tetsuo Handa0b3a2c92014-01-02 19:52:20 +09001823 rcu_read_unlock();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001824}
1825
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001826static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001827{
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001828 const unsigned long *gflags = &gl->gl_flags;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001829 char *p = buf;
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001830
Steven Whitehouse6802e342008-05-21 17:03:22 +01001831 if (test_bit(GLF_LOCK, gflags))
1832 *p++ = 'l';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001833 if (test_bit(GLF_DEMOTE, gflags))
1834 *p++ = 'D';
1835 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1836 *p++ = 'd';
1837 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1838 *p++ = 'p';
1839 if (test_bit(GLF_DIRTY, gflags))
1840 *p++ = 'y';
1841 if (test_bit(GLF_LFLUSH, gflags))
1842 *p++ = 'f';
1843 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1844 *p++ = 'i';
1845 if (test_bit(GLF_REPLY_PENDING, gflags))
1846 *p++ = 'r';
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001847 if (test_bit(GLF_INITIAL, gflags))
Steven Whitehoused8348de2009-02-05 10:12:38 +00001848 *p++ = 'I';
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001849 if (test_bit(GLF_FROZEN, gflags))
1850 *p++ = 'F';
Steven Whitehouse7b5e3d52010-09-03 09:39:20 +01001851 if (test_bit(GLF_QUEUED, gflags))
1852 *p++ = 'q';
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001853 if (test_bit(GLF_LRU, gflags))
1854 *p++ = 'L';
1855 if (gl->gl_object)
1856 *p++ = 'o';
Steven Whitehousea2457692012-01-20 10:38:36 +00001857 if (test_bit(GLF_BLOCKING, gflags))
1858 *p++ = 'b';
Steven Whitehouse6802e342008-05-21 17:03:22 +01001859 *p = 0;
1860 return buf;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001861}
1862
1863/**
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +01001864 * gfs2_dump_glock - print information about a glock
Steven Whitehouse6802e342008-05-21 17:03:22 +01001865 * @seq: The seq_file struct
David Teiglandb3b94fa2006-01-16 16:50:04 +00001866 * @gl: the glock
Bob Peterson3792ce92019-05-09 09:21:48 -05001867 * @fsid: If true, also dump the file system id
Steven Whitehouse6802e342008-05-21 17:03:22 +01001868 *
1869 * The file format is as follows:
1870 * One line per object, capital letters are used to indicate objects
1871 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1872 * other objects are indented by a single space and follow the glock to
1873 * which they are related. Fields are indicated by lower case letters
1874 * followed by a colon and the field value, except for strings which are in
1875 * [] so that its possible to see if they are composed of spaces for
1876 * example. The field's are n = number (id of the object), f = flags,
1877 * t = type, s = state, r = refcount, e = error, p = pid.
David Teiglandb3b94fa2006-01-16 16:50:04 +00001878 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001879 */
1880
Bob Peterson3792ce92019-05-09 09:21:48 -05001881void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001882{
Steven Whitehouse6802e342008-05-21 17:03:22 +01001883 const struct gfs2_glock_operations *glops = gl->gl_ops;
1884 unsigned long long dtime;
1885 const struct gfs2_holder *gh;
1886 char gflags_buf[32];
Bob Peterson3792ce92019-05-09 09:21:48 -05001887 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Bob Peterson98fb0572019-08-13 09:25:15 -04001888 char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
David Teiglandb3b94fa2006-01-16 16:50:04 +00001889
Bob Peterson3792ce92019-05-09 09:21:48 -05001890 memset(fs_id_buf, 0, sizeof(fs_id_buf));
1891 if (fsid && sdp) /* safety precaution */
1892 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
Steven Whitehouse6802e342008-05-21 17:03:22 +01001893 dtime = jiffies - gl->gl_demote_time;
1894 dtime *= 1000000/HZ; /* demote time in uSec */
1895 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1896 dtime = 0;
Bob Peterson3792ce92019-05-09 09:21:48 -05001897 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
1898 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
Steven Whitehouse6802e342008-05-21 17:03:22 +01001899 gl->gl_name.ln_type,
1900 (unsigned long long)gl->gl_name.ln_number,
Steven Whitehouse627c10b2011-04-14 14:09:52 +01001901 gflags2str(gflags_buf, gl),
Steven Whitehouse6802e342008-05-21 17:03:22 +01001902 state2str(gl->gl_target),
1903 state2str(gl->gl_demote_state), dtime,
Steven Whitehouse6802e342008-05-21 17:03:22 +01001904 atomic_read(&gl->gl_ail_count),
Bob Peterson638803d2019-06-06 07:33:38 -05001905 atomic_read(&gl->gl_revokes),
Steven Whitehousee66cf162013-10-15 15:18:08 +01001906 (int)gl->gl_lockref.count, gl->gl_hold_time);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001907
Steven Whitehouseac3beb62014-01-16 10:31:13 +00001908 list_for_each_entry(gh, &gl->gl_holders, gh_list)
Bob Peterson3792ce92019-05-09 09:21:48 -05001909 dump_holder(seq, gh, fs_id_buf);
Steven Whitehouseac3beb62014-01-16 10:31:13 +00001910
Steven Whitehouse6802e342008-05-21 17:03:22 +01001911 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
Bob Peterson3792ce92019-05-09 09:21:48 -05001912 glops->go_dump(seq, gl, fs_id_buf);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001913}
1914
Steven Whitehousea2457692012-01-20 10:38:36 +00001915static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1916{
1917 struct gfs2_glock *gl = iter_ptr;
Steven Whitehouse6802e342008-05-21 17:03:22 +01001918
Ben Hutchings4d207132015-08-27 12:51:45 -05001919 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
Steven Whitehousea2457692012-01-20 10:38:36 +00001920 gl->gl_name.ln_type,
1921 (unsigned long long)gl->gl_name.ln_number,
Ben Hutchings4d207132015-08-27 12:51:45 -05001922 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1923 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1924 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1925 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1926 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1927 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1928 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1929 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
Steven Whitehousea2457692012-01-20 10:38:36 +00001930 return 0;
1931}
David Teiglandb3b94fa2006-01-16 16:50:04 +00001932
Steven Whitehousea2457692012-01-20 10:38:36 +00001933static const char *gfs2_gltype[] = {
1934 "type",
1935 "reserved",
1936 "nondisk",
1937 "inode",
1938 "rgrp",
1939 "meta",
1940 "iopen",
1941 "flock",
1942 "plock",
1943 "quota",
1944 "journal",
1945};
1946
1947static const char *gfs2_stype[] = {
1948 [GFS2_LKS_SRTT] = "srtt",
1949 [GFS2_LKS_SRTTVAR] = "srttvar",
1950 [GFS2_LKS_SRTTB] = "srttb",
1951 [GFS2_LKS_SRTTVARB] = "srttvarb",
1952 [GFS2_LKS_SIRT] = "sirt",
1953 [GFS2_LKS_SIRTVAR] = "sirtvar",
1954 [GFS2_LKS_DCOUNT] = "dlm",
1955 [GFS2_LKS_QCOUNT] = "queue",
1956};
1957
1958#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1959
1960static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1961{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05001962 struct gfs2_sbd *sdp = seq->private;
1963 loff_t pos = *(loff_t *)iter_ptr;
1964 unsigned index = pos >> 3;
1965 unsigned subindex = pos & 0x07;
Steven Whitehousea2457692012-01-20 10:38:36 +00001966 int i;
1967
1968 if (index == 0 && subindex != 0)
1969 return 0;
1970
1971 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1972 (index == 0) ? "cpu": gfs2_stype[subindex]);
1973
1974 for_each_possible_cpu(i) {
1975 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
Andreas Gruenbacher8f7e0a82015-08-27 13:02:54 -05001976
1977 if (index == 0)
1978 seq_printf(seq, " %15u", i);
1979 else
1980 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1981 lkstats[index - 1].stats[subindex]);
Steven Whitehousea2457692012-01-20 10:38:36 +00001982 }
1983 seq_putc(seq, '\n');
1984 return 0;
1985}
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01001986
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001987int __init gfs2_glock_init(void)
1988{
Andreas Gruenbacher05154802017-08-01 11:18:26 -05001989 int i, ret;
Bob Peterson88ffbf32015-03-16 11:02:46 -05001990
1991 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1992 if (ret < 0)
1993 return ret;
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01001994
Steven Whitehoused2115772010-11-03 19:58:53 +00001995 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
Tejun Heo58a69cb2011-02-16 09:25:31 +01001996 WQ_HIGHPRI | WQ_FREEZABLE, 0);
Bob Peterson88ffbf32015-03-16 11:02:46 -05001997 if (!glock_workqueue) {
1998 rhashtable_destroy(&gl_hash_table);
Dan Carpenterdfc46162013-08-15 10:54:43 +03001999 return -ENOMEM;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002000 }
Steven Whitehoused2115772010-11-03 19:58:53 +00002001 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
Tejun Heo58a69cb2011-02-16 09:25:31 +01002002 WQ_MEM_RECLAIM | WQ_FREEZABLE,
Steven Whitehoused2115772010-11-03 19:58:53 +00002003 0);
Dan Carpenterdfc46162013-08-15 10:54:43 +03002004 if (!gfs2_delete_workqueue) {
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002005 destroy_workqueue(glock_workqueue);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002006 rhashtable_destroy(&gl_hash_table);
Dan Carpenterdfc46162013-08-15 10:54:43 +03002007 return -ENOMEM;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002008 }
Steven Whitehouse97cc10252008-11-20 13:39:47 +00002009
Chao Yue0d735c2016-09-21 12:09:40 -05002010 ret = register_shrinker(&glock_shrinker);
2011 if (ret) {
2012 destroy_workqueue(gfs2_delete_workqueue);
2013 destroy_workqueue(glock_workqueue);
2014 rhashtable_destroy(&gl_hash_table);
2015 return ret;
2016 }
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05002017
Andreas Gruenbacher05154802017-08-01 11:18:26 -05002018 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
2019 init_waitqueue_head(glock_wait_table + i);
2020
Steven Whitehouse85d1da62006-09-07 14:40:21 -04002021 return 0;
2022}
2023
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01002024void gfs2_glock_exit(void)
2025{
Steven Whitehouse97cc10252008-11-20 13:39:47 +00002026 unregister_shrinker(&glock_shrinker);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002027 rhashtable_destroy(&gl_hash_table);
Benjamin Marzinskic4f68a12007-08-23 13:19:05 -05002028 destroy_workqueue(glock_workqueue);
Benjamin Marzinskib94a1702009-07-23 18:52:34 -05002029 destroy_workqueue(gfs2_delete_workqueue);
Steven Whitehouse8fbbfd22007-08-01 13:57:10 +01002030}
2031
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002032static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002033{
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002034 struct gfs2_glock *gl = gi->gl;
2035
2036 if (gl) {
2037 if (n == 0)
2038 return;
2039 if (!lockref_put_not_zero(&gl->gl_lockref))
2040 gfs2_glock_queue_put(gl);
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002041 }
2042 for (;;) {
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002043 gl = rhashtable_walk_next(&gi->hti);
2044 if (IS_ERR_OR_NULL(gl)) {
2045 if (gl == ERR_PTR(-EAGAIN)) {
2046 n = 1;
2047 continue;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002048 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002049 gl = NULL;
2050 break;
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002051 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002052 if (gl->gl_name.ln_sbd != gi->sdp)
2053 continue;
2054 if (n <= 1) {
2055 if (!lockref_get_not_dead(&gl->gl_lockref))
2056 continue;
2057 break;
2058 } else {
2059 if (__lockref_is_dead(&gl->gl_lockref))
2060 continue;
2061 n--;
2062 }
Dan Carpenter14d37562016-12-14 08:02:03 -06002063 }
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002064 gi->gl = gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002065}
2066
Steven Whitehouse6802e342008-05-21 17:03:22 +01002067static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
Bob Peterson27c3b412017-08-18 09:15:13 -05002068 __acquires(RCU)
Robert Peterson7c52b162007-03-16 10:26:37 +00002069{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002070 struct gfs2_glock_iter *gi = seq->private;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002071 loff_t n;
Robert Peterson7c52b162007-03-16 10:26:37 +00002072
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002073 /*
2074 * We can either stay where we are, skip to the next hash table
2075 * entry, or start from the beginning.
2076 */
2077 if (*pos < gi->last_pos) {
2078 rhashtable_walk_exit(&gi->hti);
2079 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2080 n = *pos + 1;
2081 } else {
2082 n = *pos - gi->last_pos;
2083 }
Robert Peterson7c52b162007-03-16 10:26:37 +00002084
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002085 rhashtable_walk_start(&gi->hti);
Robert Peterson7c52b162007-03-16 10:26:37 +00002086
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002087 gfs2_glock_iter_next(gi, n);
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +01002088 gi->last_pos = *pos;
Steven Whitehouse6802e342008-05-21 17:03:22 +01002089 return gi->gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002090}
2091
Steven Whitehouse6802e342008-05-21 17:03:22 +01002092static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
Robert Peterson7c52b162007-03-16 10:26:37 +00002093 loff_t *pos)
2094{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002095 struct gfs2_glock_iter *gi = seq->private;
Robert Peterson7c52b162007-03-16 10:26:37 +00002096
2097 (*pos)++;
Steven Whitehouseba1ddcb2012-06-08 11:16:22 +01002098 gi->last_pos = *pos;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002099 gfs2_glock_iter_next(gi, 1);
Steven Whitehouse6802e342008-05-21 17:03:22 +01002100 return gi->gl;
Robert Peterson7c52b162007-03-16 10:26:37 +00002101}
2102
Steven Whitehouse6802e342008-05-21 17:03:22 +01002103static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
Bob Peterson27c3b412017-08-18 09:15:13 -05002104 __releases(RCU)
Robert Peterson7c52b162007-03-16 10:26:37 +00002105{
Steven Whitehouse6802e342008-05-21 17:03:22 +01002106 struct gfs2_glock_iter *gi = seq->private;
Steven Whitehousebc015cb2011-01-19 09:30:01 +00002107
Bob Peterson88ffbf32015-03-16 11:02:46 -05002108 rhashtable_walk_stop(&gi->hti);
Robert Peterson7c52b162007-03-16 10:26:37 +00002109}
2110
Steven Whitehouse6802e342008-05-21 17:03:22 +01002111static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
Robert Peterson7c52b162007-03-16 10:26:37 +00002112{
Bob Peterson3792ce92019-05-09 09:21:48 -05002113 dump_glock(seq, iter_ptr, false);
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002114 return 0;
Robert Peterson7c52b162007-03-16 10:26:37 +00002115}
2116
Steven Whitehousea2457692012-01-20 10:38:36 +00002117static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2118{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002119 preempt_disable();
Steven Whitehousea2457692012-01-20 10:38:36 +00002120 if (*pos >= GFS2_NR_SBSTATS)
2121 return NULL;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002122 return pos;
Steven Whitehousea2457692012-01-20 10:38:36 +00002123}
2124
2125static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2126 loff_t *pos)
2127{
Steven Whitehousea2457692012-01-20 10:38:36 +00002128 (*pos)++;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002129 if (*pos >= GFS2_NR_SBSTATS)
Steven Whitehousea2457692012-01-20 10:38:36 +00002130 return NULL;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002131 return pos;
Steven Whitehousea2457692012-01-20 10:38:36 +00002132}
2133
2134static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2135{
2136 preempt_enable();
2137}
2138
Denis Cheng4ef29002007-07-31 18:31:11 +08002139static const struct seq_operations gfs2_glock_seq_ops = {
Robert Peterson7c52b162007-03-16 10:26:37 +00002140 .start = gfs2_glock_seq_start,
2141 .next = gfs2_glock_seq_next,
2142 .stop = gfs2_glock_seq_stop,
2143 .show = gfs2_glock_seq_show,
2144};
2145
Steven Whitehousea2457692012-01-20 10:38:36 +00002146static const struct seq_operations gfs2_glstats_seq_ops = {
2147 .start = gfs2_glock_seq_start,
2148 .next = gfs2_glock_seq_next,
2149 .stop = gfs2_glock_seq_stop,
2150 .show = gfs2_glstats_seq_show,
2151};
2152
2153static const struct seq_operations gfs2_sbstats_seq_ops = {
2154 .start = gfs2_sbstats_seq_start,
2155 .next = gfs2_sbstats_seq_next,
2156 .stop = gfs2_sbstats_seq_stop,
2157 .show = gfs2_sbstats_seq_show,
2158};
2159
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002160#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2161
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002162static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2163 const struct seq_operations *ops)
Robert Peterson7c52b162007-03-16 10:26:37 +00002164{
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002165 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
Steven Whitehouse6802e342008-05-21 17:03:22 +01002166 if (ret == 0) {
2167 struct seq_file *seq = file->private_data;
2168 struct gfs2_glock_iter *gi = seq->private;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002169
Steven Whitehouse6802e342008-05-21 17:03:22 +01002170 gi->sdp = inode->i_private;
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002171 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
Steven Whitehousedf5d2f52012-06-07 13:30:16 +01002172 if (seq->buf)
Steven Whitehouse0fe2f1e2012-06-11 13:49:47 +01002173 seq->size = GFS2_SEQ_GOODSIZE;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002174 /*
2175 * Initially, we are "before" the first hash table entry; the
2176 * first call to rhashtable_walk_next gets us the first entry.
2177 */
2178 gi->last_pos = -1;
Bob Peterson88ffbf32015-03-16 11:02:46 -05002179 gi->gl = NULL;
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002180 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
Steven Whitehouse6802e342008-05-21 17:03:22 +01002181 }
2182 return ret;
Robert Peterson7c52b162007-03-16 10:26:37 +00002183}
2184
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002185static int gfs2_glocks_open(struct inode *inode, struct file *file)
2186{
2187 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2188}
2189
Bob Peterson88ffbf32015-03-16 11:02:46 -05002190static int gfs2_glocks_release(struct inode *inode, struct file *file)
2191{
2192 struct seq_file *seq = file->private_data;
2193 struct gfs2_glock_iter *gi = seq->private;
2194
Andreas Gruenbacher3fd5d3a2018-03-28 12:05:35 +02002195 if (gi->gl)
2196 gfs2_glock_put(gi->gl);
Andreas Gruenbacher7ac07fd2018-01-08 22:35:43 +01002197 rhashtable_walk_exit(&gi->hti);
Bob Peterson88ffbf32015-03-16 11:02:46 -05002198 return seq_release_private(inode, file);
2199}
2200
Steven Whitehousea2457692012-01-20 10:38:36 +00002201static int gfs2_glstats_open(struct inode *inode, struct file *file)
2202{
Andreas Gruenbacher92ecd732017-03-09 09:48:05 -05002203 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002204}
2205
2206static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2207{
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002208 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002209 if (ret == 0) {
2210 struct seq_file *seq = file->private_data;
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002211 seq->private = inode->i_private; /* sdp */
Steven Whitehousea2457692012-01-20 10:38:36 +00002212 }
2213 return ret;
2214}
2215
2216static const struct file_operations gfs2_glocks_fops = {
Robert Peterson7c52b162007-03-16 10:26:37 +00002217 .owner = THIS_MODULE,
Steven Whitehousea2457692012-01-20 10:38:36 +00002218 .open = gfs2_glocks_open,
2219 .read = seq_read,
2220 .llseek = seq_lseek,
Bob Peterson88ffbf32015-03-16 11:02:46 -05002221 .release = gfs2_glocks_release,
Steven Whitehousea2457692012-01-20 10:38:36 +00002222};
2223
2224static const struct file_operations gfs2_glstats_fops = {
2225 .owner = THIS_MODULE,
2226 .open = gfs2_glstats_open,
2227 .read = seq_read,
2228 .llseek = seq_lseek,
Bob Peterson88ffbf32015-03-16 11:02:46 -05002229 .release = gfs2_glocks_release,
Steven Whitehousea2457692012-01-20 10:38:36 +00002230};
2231
2232static const struct file_operations gfs2_sbstats_fops = {
2233 .owner = THIS_MODULE,
2234 .open = gfs2_sbstats_open,
Robert Peterson7c52b162007-03-16 10:26:37 +00002235 .read = seq_read,
2236 .llseek = seq_lseek,
Andreas Gruenbacher81648d02015-08-27 11:43:00 -05002237 .release = seq_release,
Robert Peterson7c52b162007-03-16 10:26:37 +00002238};
2239
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002240void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
Robert Peterson7c52b162007-03-16 10:26:37 +00002241{
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002242 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
Steven Whitehousea2457692012-01-20 10:38:36 +00002243
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002244 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2245 &gfs2_glocks_fops);
Steven Whitehousea2457692012-01-20 10:38:36 +00002246
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002247 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2248 &gfs2_glstats_fops);
Chengyu Song7b4ddfa2015-03-24 09:37:53 -05002249
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002250 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2251 &gfs2_sbstats_fops);
Robert Peterson7c52b162007-03-16 10:26:37 +00002252}
2253
2254void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2255{
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002256 debugfs_remove_recursive(sdp->debugfs_dir);
2257 sdp->debugfs_dir = NULL;
Robert Peterson7c52b162007-03-16 10:26:37 +00002258}
2259
Greg Kroah-Hartman2abbf9a2019-01-22 16:21:51 +01002260void gfs2_register_debugfs(void)
Robert Peterson7c52b162007-03-16 10:26:37 +00002261{
2262 gfs2_root = debugfs_create_dir("gfs2", NULL);
Robert Peterson7c52b162007-03-16 10:26:37 +00002263}
2264
2265void gfs2_unregister_debugfs(void)
2266{
2267 debugfs_remove(gfs2_root);
Robert Peterson5f882092007-04-18 11:41:11 -05002268 gfs2_root = NULL;
Robert Peterson7c52b162007-03-16 10:26:37 +00002269}