blob: 3f0974e1afef8bbd2b706a418f616f38e2817f71 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04003 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
Steven Whitehoused0dc80d2006-03-29 14:36:49 -050018#include <linux/kallsyms.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050019#include <linux/gfs2_ondisk.h>
Steven Whitehouse24264432006-09-11 21:40:30 -040020#include <linux/list.h>
Fabio Massimo Di Nitto7d308592006-09-19 07:56:29 +020021#include <linux/lm_interface.h>
Steven Whitehousefee852e2007-01-17 15:33:23 +000022#include <linux/wait.h>
akpm@linux-foundation.org95d97b72007-03-05 23:10:39 -080023#include <linux/module.h>
Steven Whitehouse61be0842007-01-29 11:51:45 +000024#include <linux/rwsem.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include <asm/uaccess.h>
Robert Peterson7c52b162007-03-16 10:26:37 +000026#include <linux/seq_file.h>
27#include <linux/debugfs.h>
Robert Peterson04b933f2007-03-23 17:05:15 -050028#include <linux/module.h>
29#include <linux/kallsyms.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000030
31#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050032#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000033#include "glock.h"
34#include "glops.h"
35#include "inode.h"
36#include "lm.h"
37#include "lops.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050041#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000042
Steven Whitehouse37b2fa62006-09-08 13:35:56 -040043struct gfs2_gl_hash_bucket {
Steven Whitehouseb6397892006-09-12 10:10:01 -040044 struct hlist_head hb_list;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -040045};
46
Robert Peterson7c52b162007-03-16 10:26:37 +000047struct glock_iter {
48 int hash; /* hash bucket index */
49 struct gfs2_sbd *sdp; /* incore superblock */
50 struct gfs2_glock *gl; /* current glock struct */
51 struct hlist_head *hb_list; /* current hash bucket ptr */
52 struct seq_file *seq; /* sequence file for debugfs */
53 char string[512]; /* scratch space */
54};
55
David Teiglandb3b94fa2006-01-16 16:50:04 +000056typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
Adrian Bunk08bc2db2006-04-28 10:59:12 -040058static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
Robert Peterson04b933f2007-03-23 17:05:15 -050059static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +000060static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
Steven Whitehouseb5d32be2007-01-22 12:15:34 -050061static void gfs2_glock_drop_th(struct gfs2_glock *gl);
Steven Whitehouse61be0842007-01-29 11:51:45 +000062static DECLARE_RWSEM(gfs2_umount_flush_sem);
Robert Peterson7c52b162007-03-16 10:26:37 +000063static struct dentry *gfs2_root;
Adrian Bunk08bc2db2006-04-28 10:59:12 -040064
Steven Whitehouseb6397892006-09-12 10:10:01 -040065#define GFS2_GL_HASH_SHIFT 15
Steven Whitehouse087efdd2006-09-09 16:59:11 -040066#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
67#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
68
Steven Whitehouse85d1da62006-09-07 14:40:21 -040069static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
Robert Peterson04b933f2007-03-23 17:05:15 -050070static struct dentry *gfs2_root;
Steven Whitehouse087efdd2006-09-09 16:59:11 -040071
72/*
73 * Despite what you might think, the numbers below are not arbitrary :-)
74 * They are taken from the ipv4 routing hash code, which is well tested
75 * and thus should be nearly optimal. Later on we might tweek the numbers
76 * but for now this should be fine.
77 *
78 * The reason for putting the locks in a separate array from the list heads
79 * is that we can have fewer locks than list heads and save memory. We use
80 * the same hash function for both, but with a different hash mask.
81 */
82#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
83 defined(CONFIG_PROVE_LOCKING)
84
85#ifdef CONFIG_LOCKDEP
86# define GL_HASH_LOCK_SZ 256
87#else
88# if NR_CPUS >= 32
89# define GL_HASH_LOCK_SZ 4096
90# elif NR_CPUS >= 16
91# define GL_HASH_LOCK_SZ 2048
92# elif NR_CPUS >= 8
93# define GL_HASH_LOCK_SZ 1024
94# elif NR_CPUS >= 4
95# define GL_HASH_LOCK_SZ 512
96# else
97# define GL_HASH_LOCK_SZ 256
98# endif
99#endif
100
101/* We never want more locks than chains */
102#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
103# undef GL_HASH_LOCK_SZ
104# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
105#endif
106
107static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
108
109static inline rwlock_t *gl_lock_addr(unsigned int x)
110{
Steven Whitehouse94610612006-09-09 18:59:27 -0400111 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400112}
113#else /* not SMP, so no spinlocks required */
Randy Dunlap0ac23062006-11-28 22:29:19 -0800114static inline rwlock_t *gl_lock_addr(unsigned int x)
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400115{
116 return NULL;
117}
118#endif
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400119
David Teiglandb3b94fa2006-01-16 16:50:04 +0000120/**
121 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
122 * @actual: the current state of the lock
123 * @requested: the lock state that was requested by the caller
124 * @flags: the modifier flags passed in by the caller
125 *
126 * Returns: 1 if the locks are compatible, 0 otherwise
127 */
128
129static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
130 int flags)
131{
132 if (actual == requested)
133 return 1;
134
135 if (flags & GL_EXACT)
136 return 0;
137
138 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
139 return 1;
140
141 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
142 return 1;
143
144 return 0;
145}
146
147/**
148 * gl_hash() - Turn glock number into hash bucket number
149 * @lock: The glock number
150 *
151 * Returns: The number of the corresponding hash bucket
152 */
153
Steven Whitehouseb8547852006-09-07 13:12:27 -0400154static unsigned int gl_hash(const struct gfs2_sbd *sdp,
155 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000156{
157 unsigned int h;
158
Steven Whitehousecd915492006-09-04 12:49:07 -0400159 h = jhash(&name->ln_number, sizeof(u64), 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000160 h = jhash(&name->ln_type, sizeof(unsigned int), h);
Steven Whitehouseb8547852006-09-07 13:12:27 -0400161 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000162 h &= GFS2_GL_HASH_MASK;
163
164 return h;
165}
166
167/**
168 * glock_free() - Perform a few checks and then release struct gfs2_glock
169 * @gl: The glock to release
170 *
171 * Also calls lock module to release its internal structure for this glock.
172 *
173 */
174
175static void glock_free(struct gfs2_glock *gl)
176{
177 struct gfs2_sbd *sdp = gl->gl_sbd;
178 struct inode *aspace = gl->gl_aspace;
179
180 gfs2_lm_put_lock(sdp, gl->gl_lock);
181
182 if (aspace)
183 gfs2_aspace_put(aspace);
184
185 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000186}
187
188/**
189 * gfs2_glock_hold() - increment reference count on glock
190 * @gl: The glock to hold
191 *
192 */
193
194void gfs2_glock_hold(struct gfs2_glock *gl)
195{
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400196 atomic_inc(&gl->gl_ref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000197}
198
199/**
200 * gfs2_glock_put() - Decrement reference count on glock
201 * @gl: The glock to put
202 *
203 */
204
205int gfs2_glock_put(struct gfs2_glock *gl)
206{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000207 int rv = 0;
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400208 struct gfs2_sbd *sdp = gl->gl_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000209
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400210 write_lock(gl_lock_addr(gl->gl_hash));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400211 if (atomic_dec_and_test(&gl->gl_ref)) {
Steven Whitehouseb6397892006-09-12 10:10:01 -0400212 hlist_del(&gl->gl_list);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400213 write_unlock(gl_lock_addr(gl->gl_hash));
Steven Whitehouse190562b2006-04-20 16:57:23 -0400214 BUG_ON(spin_is_locked(&gl->gl_spin));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400215 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
216 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
217 gfs2_assert(sdp, list_empty(&gl->gl_holders));
218 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400219 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000220 glock_free(gl);
221 rv = 1;
222 goto out;
223 }
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400224 write_unlock(gl_lock_addr(gl->gl_hash));
Steven Whitehousea2242db2006-08-24 17:03:05 -0400225out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000226 return rv;
227}
228
229/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000230 * search_bucket() - Find struct gfs2_glock by lock number
231 * @bucket: the bucket to search
232 * @name: The lock name
233 *
234 * Returns: NULL, or the struct gfs2_glock with the requested number
235 */
236
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400237static struct gfs2_glock *search_bucket(unsigned int hash,
Steven Whitehouse899be4d2006-08-30 12:50:28 -0400238 const struct gfs2_sbd *sdp,
Steven Whitehoused6a53722006-08-30 11:16:23 -0400239 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000240{
241 struct gfs2_glock *gl;
Steven Whitehouseb6397892006-09-12 10:10:01 -0400242 struct hlist_node *h;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000243
Steven Whitehouseb6397892006-09-12 10:10:01 -0400244 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000245 if (!lm_name_equal(&gl->gl_name, name))
246 continue;
Steven Whitehouse899be4d2006-08-30 12:50:28 -0400247 if (gl->gl_sbd != sdp)
248 continue;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000249
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400250 atomic_inc(&gl->gl_ref);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000251
252 return gl;
253 }
254
255 return NULL;
256}
257
258/**
259 * gfs2_glock_find() - Find glock by lock number
260 * @sdp: The GFS2 superblock
261 * @name: The lock name
262 *
263 * Returns: NULL, or the struct gfs2_glock with the requested number
264 */
265
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400266static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
Steven Whitehoused6a53722006-08-30 11:16:23 -0400267 const struct lm_lockname *name)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000268{
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400269 unsigned int hash = gl_hash(sdp, name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000270 struct gfs2_glock *gl;
271
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400272 read_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400273 gl = search_bucket(hash, sdp, name);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400274 read_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000275
276 return gl;
277}
278
279/**
280 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
281 * @sdp: The GFS2 superblock
282 * @number: the lock number
283 * @glops: The glock_operations to use
284 * @create: If 0, don't create the glock if it doesn't exist
285 * @glp: the glock is returned here
286 *
287 * This does not lock a glock, just finds/creates structures for one.
288 *
289 * Returns: errno
290 */
291
Steven Whitehousecd915492006-09-04 12:49:07 -0400292int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400293 const struct gfs2_glock_operations *glops, int create,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000294 struct gfs2_glock **glp)
295{
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400296 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teiglandb3b94fa2006-01-16 16:50:04 +0000297 struct gfs2_glock *gl, *tmp;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400298 unsigned int hash = gl_hash(sdp, &name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000299 int error;
300
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400301 read_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400302 gl = search_bucket(hash, sdp, &name);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400303 read_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000304
305 if (gl || !create) {
306 *glp = gl;
307 return 0;
308 }
309
310 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
311 if (!gl)
312 return -ENOMEM;
313
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400314 gl->gl_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000315 gl->gl_name = name;
Steven Whitehouse16feb9f2006-09-13 10:43:37 -0400316 atomic_set(&gl->gl_ref, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000317 gl->gl_state = LM_ST_UNLOCKED;
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400318 gl->gl_hash = hash;
Robert Peterson04b933f2007-03-23 17:05:15 -0500319 gl->gl_owner_pid = 0;
Steven Whitehouse320dd102006-05-18 16:25:27 -0400320 gl->gl_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000321 gl->gl_ops = glops;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400322 gl->gl_req_gh = NULL;
323 gl->gl_req_bh = NULL;
324 gl->gl_vn = 0;
325 gl->gl_stamp = jiffies;
326 gl->gl_object = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000327 gl->gl_sbd = sdp;
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400328 gl->gl_aspace = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000329 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000330
331 /* If this glock protects actual on-disk data or metadata blocks,
332 create a VFS inode to manage the pages/buffers holding them. */
Steven Whitehouse50299962006-09-04 09:49:55 -0400333 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000334 gl->gl_aspace = gfs2_aspace_get(sdp);
335 if (!gl->gl_aspace) {
336 error = -ENOMEM;
337 goto fail;
338 }
339 }
340
341 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
342 if (error)
343 goto fail_aspace;
344
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400345 write_lock(gl_lock_addr(hash));
Steven Whitehouse37b2fa62006-09-08 13:35:56 -0400346 tmp = search_bucket(hash, sdp, &name);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000347 if (tmp) {
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400348 write_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000349 glock_free(gl);
350 gl = tmp;
351 } else {
Steven Whitehouseb6397892006-09-12 10:10:01 -0400352 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
Steven Whitehouse087efdd2006-09-09 16:59:11 -0400353 write_unlock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000354 }
355
356 *glp = gl;
357
358 return 0;
359
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400360fail_aspace:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000361 if (gl->gl_aspace)
362 gfs2_aspace_put(gl->gl_aspace);
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400363fail:
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400364 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000365 return error;
366}
367
368/**
369 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
370 * @gl: the glock
371 * @state: the state we're requesting
372 * @flags: the modifier flags
373 * @gh: the holder structure
374 *
375 */
376
Steven Whitehouse190562b2006-04-20 16:57:23 -0400377void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000378 struct gfs2_holder *gh)
379{
380 INIT_LIST_HEAD(&gh->gh_list);
381 gh->gh_gl = gl;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500382 gh->gh_ip = (unsigned long)__builtin_return_address(0);
Robert Peterson04b933f2007-03-23 17:05:15 -0500383 gh->gh_owner_pid = current->pid;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000384 gh->gh_state = state;
385 gh->gh_flags = flags;
386 gh->gh_error = 0;
387 gh->gh_iflags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000388 gfs2_glock_hold(gl);
389}
390
391/**
392 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
393 * @state: the state we're requesting
394 * @flags: the modifier flags
395 * @gh: the holder structure
396 *
397 * Don't mess with the glock.
398 *
399 */
400
Steven Whitehouse190562b2006-04-20 16:57:23 -0400401void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000402{
403 gh->gh_state = state;
Steven Whitehouse579b78a2006-04-26 14:58:26 -0400404 gh->gh_flags = flags;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000405 gh->gh_iflags = 0;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500406 gh->gh_ip = (unsigned long)__builtin_return_address(0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000407}
408
409/**
410 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
411 * @gh: the holder structure
412 *
413 */
414
415void gfs2_holder_uninit(struct gfs2_holder *gh)
416{
417 gfs2_glock_put(gh->gh_gl);
418 gh->gh_gl = NULL;
Steven Whitehoused0dc80d2006-03-29 14:36:49 -0500419 gh->gh_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000420}
421
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000422static void gfs2_holder_wake(struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000423{
Steven Whitehousefee852e2007-01-17 15:33:23 +0000424 clear_bit(HIF_WAIT, &gh->gh_iflags);
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100425 smp_mb__after_clear_bit();
Steven Whitehousefee852e2007-01-17 15:33:23 +0000426 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
427}
428
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100429static int just_schedule(void *word)
Steven Whitehousefee852e2007-01-17 15:33:23 +0000430{
431 schedule();
432 return 0;
433}
434
435static void wait_on_holder(struct gfs2_holder *gh)
436{
437 might_sleep();
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100438 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
439}
440
441static void gfs2_demote_wake(struct gfs2_glock *gl)
442{
443 clear_bit(GLF_DEMOTE, &gl->gl_flags);
444 smp_mb__after_clear_bit();
445 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
446}
447
448static void wait_on_demote(struct gfs2_glock *gl)
449{
450 might_sleep();
451 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000452}
453
David Teiglandb3b94fa2006-01-16 16:50:04 +0000454/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000455 * rq_mutex - process a mutex request in the queue
456 * @gh: the glock holder
457 *
458 * Returns: 1 if the queue is blocked
459 */
460
461static int rq_mutex(struct gfs2_holder *gh)
462{
463 struct gfs2_glock *gl = gh->gh_gl;
464
465 list_del_init(&gh->gh_list);
466 /* gh->gh_error never examined. */
467 set_bit(GLF_LOCK, &gl->gl_flags);
Steven Whitehoused043e192007-01-23 16:56:36 -0500468 clear_bit(HIF_WAIT, &gh->gh_iflags);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000469 smp_mb();
470 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000471
472 return 1;
473}
474
475/**
476 * rq_promote - process a promote request in the queue
477 * @gh: the glock holder
478 *
479 * Acquire a new inter-node lock, or change a lock state to more restrictive.
480 *
481 * Returns: 1 if the queue is blocked
482 */
483
484static int rq_promote(struct gfs2_holder *gh)
485{
486 struct gfs2_glock *gl = gh->gh_gl;
487 struct gfs2_sbd *sdp = gl->gl_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488
489 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
490 if (list_empty(&gl->gl_holders)) {
491 gl->gl_req_gh = gh;
492 set_bit(GLF_LOCK, &gl->gl_flags);
493 spin_unlock(&gl->gl_spin);
494
495 if (atomic_read(&sdp->sd_reclaim_count) >
496 gfs2_tune_get(sdp, gt_reclaim_limit) &&
497 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
498 gfs2_reclaim_glock(sdp);
499 gfs2_reclaim_glock(sdp);
500 }
501
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000502 gfs2_glock_xmote_th(gh->gh_gl, gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000503 spin_lock(&gl->gl_spin);
504 }
505 return 1;
506 }
507
508 if (list_empty(&gl->gl_holders)) {
509 set_bit(HIF_FIRST, &gh->gh_iflags);
510 set_bit(GLF_LOCK, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000511 } else {
512 struct gfs2_holder *next_gh;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500513 if (gh->gh_state == LM_ST_EXCLUSIVE)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000514 return 1;
515 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
516 gh_list);
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500517 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000518 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000519 }
520
521 list_move_tail(&gh->gh_list, &gl->gl_holders);
522 gh->gh_error = 0;
523 set_bit(HIF_HOLDER, &gh->gh_iflags);
524
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000525 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000526
527 return 0;
528}
529
530/**
531 * rq_demote - process a demote request in the queue
532 * @gh: the glock holder
533 *
534 * Returns: 1 if the queue is blocked
535 */
536
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000537static int rq_demote(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000538{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000539 if (!list_empty(&gl->gl_holders))
540 return 1;
541
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000542 if (gl->gl_state == gl->gl_demote_state ||
543 gl->gl_state == LM_ST_UNLOCKED) {
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100544 gfs2_demote_wake(gl);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000545 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000546 }
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000547 set_bit(GLF_LOCK, &gl->gl_flags);
548 spin_unlock(&gl->gl_spin);
549 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
550 gl->gl_state != LM_ST_EXCLUSIVE)
551 gfs2_glock_drop_th(gl);
552 else
553 gfs2_glock_xmote_th(gl, NULL);
554 spin_lock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000555
556 return 0;
557}
558
559/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000560 * run_queue - process holder structures on a glock
561 * @gl: the glock
562 *
563 */
David Teiglandb3b94fa2006-01-16 16:50:04 +0000564static void run_queue(struct gfs2_glock *gl)
565{
566 struct gfs2_holder *gh;
567 int blocked = 1;
568
569 for (;;) {
570 if (test_bit(GLF_LOCK, &gl->gl_flags))
571 break;
572
573 if (!list_empty(&gl->gl_waiters1)) {
574 gh = list_entry(gl->gl_waiters1.next,
575 struct gfs2_holder, gh_list);
576
577 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
578 blocked = rq_mutex(gh);
579 else
580 gfs2_assert_warn(gl->gl_sbd, 0);
581
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000582 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
583 blocked = rq_demote(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000584 } else if (!list_empty(&gl->gl_waiters3)) {
585 gh = list_entry(gl->gl_waiters3.next,
586 struct gfs2_holder, gh_list);
587
588 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
589 blocked = rq_promote(gh);
590 else
591 gfs2_assert_warn(gl->gl_sbd, 0);
592
593 } else
594 break;
595
596 if (blocked)
597 break;
598 }
599}
600
601/**
602 * gfs2_glmutex_lock - acquire a local lock on a glock
603 * @gl: the glock
604 *
605 * Gives caller exclusive access to manipulate a glock structure.
606 */
607
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400608static void gfs2_glmutex_lock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000609{
610 struct gfs2_holder gh;
611
612 gfs2_holder_init(gl, 0, 0, &gh);
613 set_bit(HIF_MUTEX, &gh.gh_iflags);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000614 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
615 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +0000616
617 spin_lock(&gl->gl_spin);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400618 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000619 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400620 } else {
Robert Peterson04b933f2007-03-23 17:05:15 -0500621 gl->gl_owner_pid = current->pid;
Steven Whitehouse320dd102006-05-18 16:25:27 -0400622 gl->gl_ip = (unsigned long)__builtin_return_address(0);
Steven Whitehousefee852e2007-01-17 15:33:23 +0000623 clear_bit(HIF_WAIT, &gh.gh_iflags);
624 smp_mb();
625 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
Steven Whitehouse320dd102006-05-18 16:25:27 -0400626 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000627 spin_unlock(&gl->gl_spin);
628
Steven Whitehousefee852e2007-01-17 15:33:23 +0000629 wait_on_holder(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000630 gfs2_holder_uninit(&gh);
631}
632
633/**
634 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
635 * @gl: the glock
636 *
637 * Returns: 1 if the glock is acquired
638 */
639
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400640static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000641{
642 int acquired = 1;
643
644 spin_lock(&gl->gl_spin);
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400645 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000646 acquired = 0;
Steven Whitehouse85d1da62006-09-07 14:40:21 -0400647 } else {
Robert Peterson04b933f2007-03-23 17:05:15 -0500648 gl->gl_owner_pid = current->pid;
Steven Whitehouse320dd102006-05-18 16:25:27 -0400649 gl->gl_ip = (unsigned long)__builtin_return_address(0);
650 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000651 spin_unlock(&gl->gl_spin);
652
653 return acquired;
654}
655
656/**
657 * gfs2_glmutex_unlock - release a local lock on a glock
658 * @gl: the glock
659 *
660 */
661
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400662static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000663{
664 spin_lock(&gl->gl_spin);
665 clear_bit(GLF_LOCK, &gl->gl_flags);
Robert Peterson04b933f2007-03-23 17:05:15 -0500666 gl->gl_owner_pid = 0;
Steven Whitehouse320dd102006-05-18 16:25:27 -0400667 gl->gl_ip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000668 run_queue(gl);
Steven Whitehouse190562b2006-04-20 16:57:23 -0400669 BUG_ON(!spin_is_locked(&gl->gl_spin));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000670 spin_unlock(&gl->gl_spin);
671}
672
673/**
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000674 * handle_callback - process a demote request
David Teiglandb3b94fa2006-01-16 16:50:04 +0000675 * @gl: the glock
676 * @state: the state the caller wants us to change to
677 *
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000678 * There are only two requests that we are going to see in actual
679 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teiglandb3b94fa2006-01-16 16:50:04 +0000680 */
681
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100682static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000683{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000684 spin_lock(&gl->gl_spin);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000685 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
686 gl->gl_demote_state = state;
687 gl->gl_demote_time = jiffies;
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100688 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
689 gl->gl_object) {
690 struct inode *inode = igrab(gl->gl_object);
691 spin_unlock(&gl->gl_spin);
692 if (inode) {
693 d_prune_aliases(inode);
694 iput(inode);
695 }
696 return;
697 }
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000698 } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
699 gl->gl_demote_state = state;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000700 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000701 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000702}
703
704/**
705 * state_change - record that the glock is now in a different state
706 * @gl: the glock
707 * @new_state the new state
708 *
709 */
710
711static void state_change(struct gfs2_glock *gl, unsigned int new_state)
712{
David Teiglandb3b94fa2006-01-16 16:50:04 +0000713 int held1, held2;
714
715 held1 = (gl->gl_state != LM_ST_UNLOCKED);
716 held2 = (new_state != LM_ST_UNLOCKED);
717
718 if (held1 != held2) {
David Teigland6a6b3d02006-02-23 10:11:47 +0000719 if (held2)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000720 gfs2_glock_hold(gl);
David Teigland6a6b3d02006-02-23 10:11:47 +0000721 else
David Teiglandb3b94fa2006-01-16 16:50:04 +0000722 gfs2_glock_put(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000723 }
724
725 gl->gl_state = new_state;
726}
727
728/**
729 * xmote_bh - Called after the lock module is done acquiring a lock
730 * @gl: The glock in question
731 * @ret: the int returned from the lock module
732 *
733 */
734
735static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
736{
737 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400738 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000739 struct gfs2_holder *gh = gl->gl_req_gh;
740 int prev_state = gl->gl_state;
741 int op_done = 1;
742
743 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500744 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000745 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
746
747 state_change(gl, ret & LM_OUT_ST_MASK);
748
749 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
750 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500751 glops->go_inval(gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000752 } else if (gl->gl_state == LM_ST_DEFERRED) {
753 /* We might not want to do this here.
754 Look at moving to the inode glops. */
755 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500756 glops->go_inval(gl, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000757 }
758
759 /* Deal with each possible exit condition */
760
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000761 if (!gh) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000762 gl->gl_stamp = jiffies;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000763 if (ret & LM_OUT_CANCELED)
764 op_done = 0;
765 else
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100766 gfs2_demote_wake(gl);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000767 } else {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000768 spin_lock(&gl->gl_spin);
769 list_del_init(&gh->gh_list);
770 gh->gh_error = -EIO;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000771 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
772 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000773 gh->gh_error = GLR_CANCELED;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000774 if (ret & LM_OUT_CANCELED)
775 goto out;
776 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
777 list_add_tail(&gh->gh_list, &gl->gl_holders);
778 gh->gh_error = 0;
779 set_bit(HIF_HOLDER, &gh->gh_iflags);
780 set_bit(HIF_FIRST, &gh->gh_iflags);
781 op_done = 0;
782 goto out;
783 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000784 gh->gh_error = GLR_TRYFAILED;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000785 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
786 goto out;
787 gh->gh_error = -EINVAL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000788 if (gfs2_assert_withdraw(sdp, 0) == -1)
789 fs_err(sdp, "ret = 0x%.8X\n", ret);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000790out:
791 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000792 }
793
794 if (glops->go_xmote_bh)
795 glops->go_xmote_bh(gl);
796
797 if (op_done) {
798 spin_lock(&gl->gl_spin);
799 gl->gl_req_gh = NULL;
800 gl->gl_req_bh = NULL;
801 clear_bit(GLF_LOCK, &gl->gl_flags);
802 run_queue(gl);
803 spin_unlock(&gl->gl_spin);
804 }
805
806 gfs2_glock_put(gl);
807
Steven Whitehousefee852e2007-01-17 15:33:23 +0000808 if (gh)
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000809 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000810}
811
812/**
813 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
814 * @gl: The glock in question
815 * @state: the requested state
816 * @flags: modifier flags to the lock call
817 *
818 */
819
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000820void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000821{
822 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000823 int flags = gh ? gh->gh_flags : 0;
824 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400825 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000826 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
827 LM_FLAG_NOEXP | LM_FLAG_ANY |
828 LM_FLAG_PRIORITY);
829 unsigned int lck_ret;
830
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500831 if (glops->go_xmote_th)
832 glops->go_xmote_th(gl);
833
David Teiglandb3b94fa2006-01-16 16:50:04 +0000834 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500835 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000836 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
837 gfs2_assert_warn(sdp, state != gl->gl_state);
838
David Teiglandb3b94fa2006-01-16 16:50:04 +0000839 gfs2_glock_hold(gl);
840 gl->gl_req_bh = xmote_bh;
841
Steven Whitehouseec45d9f2006-08-30 10:36:52 -0400842 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000843
844 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
845 return;
846
847 if (lck_ret & LM_OUT_ASYNC)
848 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
849 else
850 xmote_bh(gl, lck_ret);
851}
852
853/**
854 * drop_bh - Called after a lock module unlock completes
855 * @gl: the glock
856 * @ret: the return status
857 *
858 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
859 * Doesn't drop the reference on the glock the top half took out
860 *
861 */
862
863static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
864{
865 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400866 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000867 struct gfs2_holder *gh = gl->gl_req_gh;
868
David Teiglandb3b94fa2006-01-16 16:50:04 +0000869 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500870 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000871 gfs2_assert_warn(sdp, !ret);
872
873 state_change(gl, LM_ST_UNLOCKED);
Abhijith Dasd93cfa92007-06-11 08:22:32 +0100874 gfs2_demote_wake(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000875
876 if (glops->go_inval)
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500877 glops->go_inval(gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000878
879 if (gh) {
880 spin_lock(&gl->gl_spin);
881 list_del_init(&gh->gh_list);
882 gh->gh_error = 0;
883 spin_unlock(&gl->gl_spin);
884 }
885
David Teiglandb3b94fa2006-01-16 16:50:04 +0000886 spin_lock(&gl->gl_spin);
887 gl->gl_req_gh = NULL;
888 gl->gl_req_bh = NULL;
889 clear_bit(GLF_LOCK, &gl->gl_flags);
890 run_queue(gl);
891 spin_unlock(&gl->gl_spin);
892
893 gfs2_glock_put(gl);
894
Steven Whitehousefee852e2007-01-17 15:33:23 +0000895 if (gh)
Steven Whitehouse3b8249f2007-03-16 09:40:31 +0000896 gfs2_holder_wake(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000897}
898
899/**
900 * gfs2_glock_drop_th - call into the lock module to unlock a lock
901 * @gl: the glock
902 *
903 */
904
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500905static void gfs2_glock_drop_th(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000906{
907 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400908 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000909 unsigned int ret;
910
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500911 if (glops->go_drop_th)
912 glops->go_drop_th(gl);
913
David Teiglandb3b94fa2006-01-16 16:50:04 +0000914 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
Steven Whitehouse12132932007-01-22 13:09:04 -0500915 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000916 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
917
David Teiglandb3b94fa2006-01-16 16:50:04 +0000918 gfs2_glock_hold(gl);
919 gl->gl_req_bh = drop_bh;
920
David Teiglandb3b94fa2006-01-16 16:50:04 +0000921 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
922
923 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
924 return;
925
926 if (!ret)
927 drop_bh(gl, ret);
928 else
929 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
930}
931
932/**
933 * do_cancels - cancel requests for locks stuck waiting on an expire flag
934 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
935 *
936 * Don't cancel GL_NOCANCEL requests.
937 */
938
939static void do_cancels(struct gfs2_holder *gh)
940{
941 struct gfs2_glock *gl = gh->gh_gl;
942
943 spin_lock(&gl->gl_spin);
944
945 while (gl->gl_req_gh != gh &&
946 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
947 !list_empty(&gh->gh_list)) {
Steven Whitehouse50299962006-09-04 09:49:55 -0400948 if (gl->gl_req_bh && !(gl->gl_req_gh &&
949 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000950 spin_unlock(&gl->gl_spin);
951 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
952 msleep(100);
953 spin_lock(&gl->gl_spin);
954 } else {
955 spin_unlock(&gl->gl_spin);
956 msleep(100);
957 spin_lock(&gl->gl_spin);
958 }
959 }
960
961 spin_unlock(&gl->gl_spin);
962}
963
964/**
965 * glock_wait_internal - wait on a glock acquisition
966 * @gh: the glock holder
967 *
968 * Returns: 0 on success
969 */
970
971static int glock_wait_internal(struct gfs2_holder *gh)
972{
973 struct gfs2_glock *gl = gh->gh_gl;
974 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400975 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000976
977 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
978 return -EIO;
979
980 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
981 spin_lock(&gl->gl_spin);
982 if (gl->gl_req_gh != gh &&
983 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
984 !list_empty(&gh->gh_list)) {
985 list_del_init(&gh->gh_list);
986 gh->gh_error = GLR_TRYFAILED;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000987 run_queue(gl);
988 spin_unlock(&gl->gl_spin);
989 return gh->gh_error;
990 }
991 spin_unlock(&gl->gl_spin);
992 }
993
994 if (gh->gh_flags & LM_FLAG_PRIORITY)
995 do_cancels(gh);
996
Steven Whitehousefee852e2007-01-17 15:33:23 +0000997 wait_on_holder(gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000998 if (gh->gh_error)
999 return gh->gh_error;
1000
1001 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001002 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
David Teiglandb3b94fa2006-01-16 16:50:04 +00001003 gh->gh_flags));
1004
1005 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1006 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1007
1008 if (glops->go_lock) {
1009 gh->gh_error = glops->go_lock(gh);
1010 if (gh->gh_error) {
1011 spin_lock(&gl->gl_spin);
1012 list_del_init(&gh->gh_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001013 spin_unlock(&gl->gl_spin);
1014 }
1015 }
1016
1017 spin_lock(&gl->gl_spin);
1018 gl->gl_req_gh = NULL;
1019 gl->gl_req_bh = NULL;
1020 clear_bit(GLF_LOCK, &gl->gl_flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001021 run_queue(gl);
1022 spin_unlock(&gl->gl_spin);
1023 }
1024
1025 return gh->gh_error;
1026}
1027
1028static inline struct gfs2_holder *
Robert Peterson04b933f2007-03-23 17:05:15 -05001029find_holder_by_owner(struct list_head *head, pid_t pid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001030{
1031 struct gfs2_holder *gh;
1032
1033 list_for_each_entry(gh, head, gh_list) {
Robert Peterson04b933f2007-03-23 17:05:15 -05001034 if (gh->gh_owner_pid == pid)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001035 return gh;
1036 }
1037
1038 return NULL;
1039}
1040
Robert Peterson7c52b162007-03-16 10:26:37 +00001041static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1042{
1043 va_list args;
1044
1045 va_start(args, fmt);
1046 if (gi) {
1047 vsprintf(gi->string, fmt, args);
1048 seq_printf(gi->seq, gi->string);
1049 }
1050 else
1051 vprintk(fmt, args);
1052 va_end(args);
1053}
1054
David Teiglandb3b94fa2006-01-16 16:50:04 +00001055/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001056 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1057 * @gh: the holder structure to add
1058 *
1059 */
1060
1061static void add_to_queue(struct gfs2_holder *gh)
1062{
1063 struct gfs2_glock *gl = gh->gh_gl;
1064 struct gfs2_holder *existing;
1065
Robert Peterson04b933f2007-03-23 17:05:15 -05001066 BUG_ON(!gh->gh_owner_pid);
Steven Whitehousefee852e2007-01-17 15:33:23 +00001067 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1068 BUG();
Steven Whitehouse190562b2006-04-20 16:57:23 -04001069
Robert Peterson04b933f2007-03-23 17:05:15 -05001070 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001071 if (existing) {
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001072 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
Robert Peterson04b933f2007-03-23 17:05:15 -05001073 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001074 printk(KERN_INFO "lock type : %d lock state : %d\n",
Abhijith Das86384602006-08-25 11:13:37 -05001075 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001076 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
Robert Peterson04b933f2007-03-23 17:05:15 -05001077 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001078 printk(KERN_INFO "lock type : %d lock state : %d\n",
Abhijith Das86384602006-08-25 11:13:37 -05001079 gl->gl_name.ln_type, gl->gl_state);
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001080 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001081 }
1082
Robert Peterson04b933f2007-03-23 17:05:15 -05001083 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001084 if (existing) {
Steven Whitehouse5965b1f2006-04-26 13:21:55 -04001085 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1086 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1087 BUG();
David Teiglandb3b94fa2006-01-16 16:50:04 +00001088 }
1089
David Teiglandb3b94fa2006-01-16 16:50:04 +00001090 if (gh->gh_flags & LM_FLAG_PRIORITY)
1091 list_add(&gh->gh_list, &gl->gl_waiters3);
1092 else
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001093 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001094}
1095
1096/**
1097 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1098 * @gh: the holder structure
1099 *
1100 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1101 *
1102 * Returns: 0, GLR_TRYFAILED, or errno on failure
1103 */
1104
1105int gfs2_glock_nq(struct gfs2_holder *gh)
1106{
1107 struct gfs2_glock *gl = gh->gh_gl;
1108 struct gfs2_sbd *sdp = gl->gl_sbd;
1109 int error = 0;
1110
Steven Whitehouse320dd102006-05-18 16:25:27 -04001111restart:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001112 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1113 set_bit(HIF_ABORTED, &gh->gh_iflags);
1114 return -EIO;
1115 }
1116
1117 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1118
1119 spin_lock(&gl->gl_spin);
1120 add_to_queue(gh);
1121 run_queue(gl);
1122 spin_unlock(&gl->gl_spin);
1123
1124 if (!(gh->gh_flags & GL_ASYNC)) {
1125 error = glock_wait_internal(gh);
1126 if (error == GLR_CANCELED) {
Steven Whitehouse190562b2006-04-20 16:57:23 -04001127 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001128 goto restart;
1129 }
1130 }
1131
David Teiglandb3b94fa2006-01-16 16:50:04 +00001132 return error;
1133}
1134
1135/**
1136 * gfs2_glock_poll - poll to see if an async request has been completed
1137 * @gh: the holder
1138 *
1139 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1140 */
1141
1142int gfs2_glock_poll(struct gfs2_holder *gh)
1143{
1144 struct gfs2_glock *gl = gh->gh_gl;
1145 int ready = 0;
1146
1147 spin_lock(&gl->gl_spin);
1148
1149 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1150 ready = 1;
1151 else if (list_empty(&gh->gh_list)) {
1152 if (gh->gh_error == GLR_CANCELED) {
1153 spin_unlock(&gl->gl_spin);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001154 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001155 if (gfs2_glock_nq(gh))
1156 return 1;
1157 return 0;
1158 } else
1159 ready = 1;
1160 }
1161
1162 spin_unlock(&gl->gl_spin);
1163
1164 return ready;
1165}
1166
1167/**
1168 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1169 * @gh: the holder structure
1170 *
1171 * Returns: 0, GLR_TRYFAILED, or errno on failure
1172 */
1173
1174int gfs2_glock_wait(struct gfs2_holder *gh)
1175{
1176 int error;
1177
1178 error = glock_wait_internal(gh);
1179 if (error == GLR_CANCELED) {
Steven Whitehouse190562b2006-04-20 16:57:23 -04001180 msleep(100);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001181 gh->gh_flags &= ~GL_ASYNC;
1182 error = gfs2_glock_nq(gh);
1183 }
1184
1185 return error;
1186}
1187
1188/**
1189 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1190 * @gh: the glock holder
1191 *
1192 */
1193
1194void gfs2_glock_dq(struct gfs2_holder *gh)
1195{
1196 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001197 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001198
David Teiglandb3b94fa2006-01-16 16:50:04 +00001199 if (gh->gh_flags & GL_NOCACHE)
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001200 handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001201
1202 gfs2_glmutex_lock(gl);
1203
1204 spin_lock(&gl->gl_spin);
1205 list_del_init(&gh->gh_list);
1206
1207 if (list_empty(&gl->gl_holders)) {
1208 spin_unlock(&gl->gl_spin);
1209
1210 if (glops->go_unlock)
1211 glops->go_unlock(gh);
1212
David Teiglandb3b94fa2006-01-16 16:50:04 +00001213 spin_lock(&gl->gl_spin);
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001214 gl->gl_stamp = jiffies;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001215 }
1216
1217 clear_bit(GLF_LOCK, &gl->gl_flags);
1218 run_queue(gl);
1219 spin_unlock(&gl->gl_spin);
1220}
1221
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001222void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1223{
1224 struct gfs2_glock *gl = gh->gh_gl;
1225 gfs2_glock_dq(gh);
1226 wait_on_demote(gl);
1227}
1228
David Teiglandb3b94fa2006-01-16 16:50:04 +00001229/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001230 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1231 * @gh: the holder structure
1232 *
1233 */
1234
1235void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1236{
1237 gfs2_glock_dq(gh);
1238 gfs2_holder_uninit(gh);
1239}
1240
1241/**
1242 * gfs2_glock_nq_num - acquire a glock based on lock number
1243 * @sdp: the filesystem
1244 * @number: the lock number
1245 * @glops: the glock operations for the type of glock
1246 * @state: the state to acquire the glock in
1247 * @flags: modifier flags for the aquisition
1248 * @gh: the struct gfs2_holder
1249 *
1250 * Returns: errno
1251 */
1252
Steven Whitehousecd915492006-09-04 12:49:07 -04001253int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001254 const struct gfs2_glock_operations *glops,
1255 unsigned int state, int flags, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001256{
1257 struct gfs2_glock *gl;
1258 int error;
1259
1260 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1261 if (!error) {
1262 error = gfs2_glock_nq_init(gl, state, flags, gh);
1263 gfs2_glock_put(gl);
1264 }
1265
1266 return error;
1267}
1268
1269/**
1270 * glock_compare - Compare two struct gfs2_glock structures for sorting
1271 * @arg_a: the first structure
1272 * @arg_b: the second structure
1273 *
1274 */
1275
1276static int glock_compare(const void *arg_a, const void *arg_b)
1277{
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001278 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1279 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1280 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1281 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001282
1283 if (a->ln_number > b->ln_number)
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001284 return 1;
1285 if (a->ln_number < b->ln_number)
1286 return -1;
Steven Whitehouse1c0f4872007-01-22 12:10:39 -05001287 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
Steven Whitehousea5e08a92006-09-09 17:07:05 -04001288 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001289}
1290
1291/**
1292 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1293 * @num_gh: the number of structures
1294 * @ghs: an array of struct gfs2_holder structures
1295 *
1296 * Returns: 0 on success (all glocks acquired),
1297 * errno on failure (no glocks acquired)
1298 */
1299
1300static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1301 struct gfs2_holder **p)
1302{
1303 unsigned int x;
1304 int error = 0;
1305
1306 for (x = 0; x < num_gh; x++)
1307 p[x] = &ghs[x];
1308
1309 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1310
1311 for (x = 0; x < num_gh; x++) {
1312 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1313
1314 error = gfs2_glock_nq(p[x]);
1315 if (error) {
1316 while (x--)
1317 gfs2_glock_dq(p[x]);
1318 break;
1319 }
1320 }
1321
1322 return error;
1323}
1324
1325/**
1326 * gfs2_glock_nq_m - acquire multiple glocks
1327 * @num_gh: the number of structures
1328 * @ghs: an array of struct gfs2_holder structures
1329 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00001330 *
1331 * Returns: 0 on success (all glocks acquired),
1332 * errno on failure (no glocks acquired)
1333 */
1334
1335int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1336{
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001337 struct gfs2_holder *tmp[4];
1338 struct gfs2_holder **pph = tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001339 int error = 0;
1340
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001341 switch(num_gh) {
1342 case 0:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001343 return 0;
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001344 case 1:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001345 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1346 return gfs2_glock_nq(ghs);
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001347 default:
1348 if (num_gh <= 4)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001349 break;
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001350 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1351 if (!pph)
1352 return -ENOMEM;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001353 }
1354
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001355 error = nq_m_sync(num_gh, ghs, pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001356
Steven Whitehouseeaf5bd32007-06-19 15:38:17 +01001357 if (pph != tmp)
1358 kfree(pph);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001359
1360 return error;
1361}
1362
1363/**
1364 * gfs2_glock_dq_m - release multiple glocks
1365 * @num_gh: the number of structures
1366 * @ghs: an array of struct gfs2_holder structures
1367 *
1368 */
1369
1370void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1371{
1372 unsigned int x;
1373
1374 for (x = 0; x < num_gh; x++)
1375 gfs2_glock_dq(&ghs[x]);
1376}
1377
1378/**
1379 * gfs2_glock_dq_uninit_m - release multiple glocks
1380 * @num_gh: the number of structures
1381 * @ghs: an array of struct gfs2_holder structures
1382 *
1383 */
1384
1385void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1386{
1387 unsigned int x;
1388
1389 for (x = 0; x < num_gh; x++)
1390 gfs2_glock_dq_uninit(&ghs[x]);
1391}
1392
1393/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001394 * gfs2_lvb_hold - attach a LVB from a glock
1395 * @gl: The glock in question
1396 *
1397 */
1398
1399int gfs2_lvb_hold(struct gfs2_glock *gl)
1400{
1401 int error;
1402
1403 gfs2_glmutex_lock(gl);
1404
1405 if (!atomic_read(&gl->gl_lvb_count)) {
1406 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1407 if (error) {
1408 gfs2_glmutex_unlock(gl);
1409 return error;
1410 }
1411 gfs2_glock_hold(gl);
1412 }
1413 atomic_inc(&gl->gl_lvb_count);
1414
1415 gfs2_glmutex_unlock(gl);
1416
1417 return 0;
1418}
1419
1420/**
1421 * gfs2_lvb_unhold - detach a LVB from a glock
1422 * @gl: The glock in question
1423 *
1424 */
1425
1426void gfs2_lvb_unhold(struct gfs2_glock *gl)
1427{
1428 gfs2_glock_hold(gl);
1429 gfs2_glmutex_lock(gl);
1430
1431 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1432 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1433 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1434 gl->gl_lvb = NULL;
1435 gfs2_glock_put(gl);
1436 }
1437
1438 gfs2_glmutex_unlock(gl);
1439 gfs2_glock_put(gl);
1440}
1441
David Teiglandb3b94fa2006-01-16 16:50:04 +00001442static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1443 unsigned int state)
1444{
1445 struct gfs2_glock *gl;
1446
1447 gl = gfs2_glock_find(sdp, name);
1448 if (!gl)
1449 return;
1450
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001451 handle_callback(gl, state, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001452
1453 spin_lock(&gl->gl_spin);
1454 run_queue(gl);
1455 spin_unlock(&gl->gl_spin);
1456
1457 gfs2_glock_put(gl);
1458}
1459
1460/**
1461 * gfs2_glock_cb - Callback used by locking module
Steven Whitehouse1c089c32006-09-07 15:50:20 -04001462 * @sdp: Pointer to the superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +00001463 * @type: Type of callback
1464 * @data: Type dependent data pointer
1465 *
1466 * Called by the locking module when it wants to tell us something.
1467 * Either we need to drop a lock, one of our ASYNC requests completed, or
1468 * a journal from another client needs to be recovered.
1469 */
1470
Steven Whitehouse9b47c112006-09-08 10:17:58 -04001471void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001472{
Steven Whitehouse9b47c112006-09-08 10:17:58 -04001473 struct gfs2_sbd *sdp = cb_data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001474
David Teiglandb3b94fa2006-01-16 16:50:04 +00001475 switch (type) {
1476 case LM_CB_NEED_E:
David Teiglande7f5c012006-04-27 11:25:45 -04001477 blocking_cb(sdp, data, LM_ST_UNLOCKED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001478 return;
1479
1480 case LM_CB_NEED_D:
David Teiglande7f5c012006-04-27 11:25:45 -04001481 blocking_cb(sdp, data, LM_ST_DEFERRED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001482 return;
1483
1484 case LM_CB_NEED_S:
David Teiglande7f5c012006-04-27 11:25:45 -04001485 blocking_cb(sdp, data, LM_ST_SHARED);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001486 return;
1487
1488 case LM_CB_ASYNC: {
David Teiglande7f5c012006-04-27 11:25:45 -04001489 struct lm_async_cb *async = data;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001490 struct gfs2_glock *gl;
1491
Steven Whitehouse61be0842007-01-29 11:51:45 +00001492 down_read(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001493 gl = gfs2_glock_find(sdp, &async->lc_name);
1494 if (gfs2_assert_warn(sdp, gl))
1495 return;
1496 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1497 gl->gl_req_bh(gl, async->lc_ret);
1498 gfs2_glock_put(gl);
Steven Whitehouse61be0842007-01-29 11:51:45 +00001499 up_read(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001500 return;
1501 }
1502
1503 case LM_CB_NEED_RECOVERY:
1504 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1505 if (sdp->sd_recoverd_process)
1506 wake_up_process(sdp->sd_recoverd_process);
1507 return;
1508
1509 case LM_CB_DROPLOCKS:
1510 gfs2_gl_hash_clear(sdp, NO_WAIT);
1511 gfs2_quota_scan(sdp);
1512 return;
1513
1514 default:
1515 gfs2_assert_warn(sdp, 0);
1516 return;
1517 }
1518}
1519
1520/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00001521 * demote_ok - Check to see if it's ok to unlock a glock
1522 * @gl: the glock
1523 *
1524 * Returns: 1 if it's ok
1525 */
1526
1527static int demote_ok(struct gfs2_glock *gl)
1528{
Steven Whitehouse8fb4b532006-08-30 09:30:00 -04001529 const struct gfs2_glock_operations *glops = gl->gl_ops;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001530 int demote = 1;
1531
1532 if (test_bit(GLF_STICKY, &gl->gl_flags))
1533 demote = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001534 else if (glops->go_demote_ok)
1535 demote = glops->go_demote_ok(gl);
1536
1537 return demote;
1538}
1539
1540/**
1541 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1542 * @gl: the glock
1543 *
1544 */
1545
1546void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1547{
1548 struct gfs2_sbd *sdp = gl->gl_sbd;
1549
1550 spin_lock(&sdp->sd_reclaim_lock);
1551 if (list_empty(&gl->gl_reclaim)) {
1552 gfs2_glock_hold(gl);
1553 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1554 atomic_inc(&sdp->sd_reclaim_count);
1555 }
1556 spin_unlock(&sdp->sd_reclaim_lock);
1557
1558 wake_up(&sdp->sd_reclaim_wq);
1559}
1560
1561/**
1562 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1563 * @sdp: the filesystem
1564 *
1565 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1566 * different glock and we notice that there are a lot of glocks in the
1567 * reclaim list.
1568 *
1569 */
1570
1571void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1572{
1573 struct gfs2_glock *gl;
1574
1575 spin_lock(&sdp->sd_reclaim_lock);
1576 if (list_empty(&sdp->sd_reclaim_list)) {
1577 spin_unlock(&sdp->sd_reclaim_lock);
1578 return;
1579 }
1580 gl = list_entry(sdp->sd_reclaim_list.next,
1581 struct gfs2_glock, gl_reclaim);
1582 list_del_init(&gl->gl_reclaim);
1583 spin_unlock(&sdp->sd_reclaim_lock);
1584
1585 atomic_dec(&sdp->sd_reclaim_count);
1586 atomic_inc(&sdp->sd_reclaimed);
1587
1588 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse12132932007-01-22 13:09:04 -05001589 if (list_empty(&gl->gl_holders) &&
Steven Whitehouse50299962006-09-04 09:49:55 -04001590 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001591 handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001592 gfs2_glmutex_unlock(gl);
1593 }
1594
1595 gfs2_glock_put(gl);
1596}
1597
1598/**
1599 * examine_bucket - Call a function for glock in a hash bucket
1600 * @examiner: the function
1601 * @sdp: the filesystem
1602 * @bucket: the bucket
1603 *
1604 * Returns: 1 if the bucket has entries
1605 */
1606
1607static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
Steven Whitehouse37b2fa62006-09-08 13:35:56 -04001608 unsigned int hash)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001609{
Steven Whitehouse24264432006-09-11 21:40:30 -04001610 struct gfs2_glock *gl, *prev = NULL;
1611 int has_entries = 0;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001612 struct hlist_head *head = &gl_hash_table[hash].hb_list;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001613
Steven Whitehouse24264432006-09-11 21:40:30 -04001614 read_lock(gl_lock_addr(hash));
Steven Whitehouseb6397892006-09-12 10:10:01 -04001615 /* Can't use hlist_for_each_entry - don't want prefetch here */
1616 if (hlist_empty(head))
Steven Whitehouse24264432006-09-11 21:40:30 -04001617 goto out;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001618 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1619 while(1) {
Steven Whitehouse24264432006-09-11 21:40:30 -04001620 if (gl->gl_sbd == sdp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001621 gfs2_glock_hold(gl);
Steven Whitehouse24264432006-09-11 21:40:30 -04001622 read_unlock(gl_lock_addr(hash));
1623 if (prev)
1624 gfs2_glock_put(prev);
1625 prev = gl;
1626 examiner(gl);
Steven Whitehousea8336342006-09-14 13:57:38 -04001627 has_entries = 1;
Steven Whitehouse24264432006-09-11 21:40:30 -04001628 read_lock(gl_lock_addr(hash));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001629 }
Steven Whitehouseb6397892006-09-12 10:10:01 -04001630 if (gl->gl_list.next == NULL)
1631 break;
Steven Whitehouse24264432006-09-11 21:40:30 -04001632 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001633 }
Steven Whitehouse24264432006-09-11 21:40:30 -04001634out:
1635 read_unlock(gl_lock_addr(hash));
1636 if (prev)
1637 gfs2_glock_put(prev);
1638 return has_entries;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001639}
1640
1641/**
1642 * scan_glock - look at a glock and see if we can reclaim it
1643 * @gl: the glock to look at
1644 *
1645 */
1646
1647static void scan_glock(struct gfs2_glock *gl)
1648{
Steven Whitehouseb0041572006-11-23 10:51:34 -05001649 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
Steven Whitehouse24264432006-09-11 21:40:30 -04001650 return;
Steven Whitehousea2242db2006-08-24 17:03:05 -04001651
David Teiglandb3b94fa2006-01-16 16:50:04 +00001652 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse12132932007-01-22 13:09:04 -05001653 if (list_empty(&gl->gl_holders) &&
Steven Whitehouse24264432006-09-11 21:40:30 -04001654 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001655 goto out_schedule;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001656 gfs2_glmutex_unlock(gl);
1657 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001658 return;
1659
Steven Whitehouse627add22006-07-05 13:16:19 -04001660out_schedule:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001661 gfs2_glmutex_unlock(gl);
1662 gfs2_glock_schedule_for_reclaim(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001663}
1664
1665/**
1666 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1667 * @sdp: the filesystem
1668 *
1669 */
1670
1671void gfs2_scand_internal(struct gfs2_sbd *sdp)
1672{
1673 unsigned int x;
1674
Steven Whitehouse94610612006-09-09 18:59:27 -04001675 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
Steven Whitehouse37b2fa62006-09-08 13:35:56 -04001676 examine_bucket(scan_glock, sdp, x);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001677}
1678
1679/**
1680 * clear_glock - look at a glock and see if we can free it from glock cache
1681 * @gl: the glock to look at
1682 *
1683 */
1684
1685static void clear_glock(struct gfs2_glock *gl)
1686{
1687 struct gfs2_sbd *sdp = gl->gl_sbd;
1688 int released;
1689
1690 spin_lock(&sdp->sd_reclaim_lock);
1691 if (!list_empty(&gl->gl_reclaim)) {
1692 list_del_init(&gl->gl_reclaim);
1693 atomic_dec(&sdp->sd_reclaim_count);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001694 spin_unlock(&sdp->sd_reclaim_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001695 released = gfs2_glock_put(gl);
1696 gfs2_assert(sdp, !released);
Steven Whitehouse190562b2006-04-20 16:57:23 -04001697 } else {
1698 spin_unlock(&sdp->sd_reclaim_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001699 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001700
1701 if (gfs2_glmutex_trylock(gl)) {
Steven Whitehouse90101c32007-01-23 13:20:41 -05001702 if (list_empty(&gl->gl_holders) &&
David Teiglandb3b94fa2006-01-16 16:50:04 +00001703 gl->gl_state != LM_ST_UNLOCKED)
Abhijith Dasd93cfa92007-06-11 08:22:32 +01001704 handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001705 gfs2_glmutex_unlock(gl);
1706 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001707}
1708
1709/**
1710 * gfs2_gl_hash_clear - Empty out the glock hash table
1711 * @sdp: the filesystem
1712 * @wait: wait until it's all gone
1713 *
1714 * Called when unmounting the filesystem, or when inter-node lock manager
1715 * requests DROPLOCKS because it is running out of capacity.
1716 */
1717
1718void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1719{
1720 unsigned long t;
1721 unsigned int x;
1722 int cont;
1723
1724 t = jiffies;
1725
1726 for (;;) {
1727 cont = 0;
Steven Whitehouse24264432006-09-11 21:40:30 -04001728 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001729 if (examine_bucket(clear_glock, sdp, x))
David Teiglandb3b94fa2006-01-16 16:50:04 +00001730 cont = 1;
Steven Whitehouse24264432006-09-11 21:40:30 -04001731 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001732
1733 if (!wait || !cont)
1734 break;
1735
1736 if (time_after_eq(jiffies,
1737 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1738 fs_warn(sdp, "Unmount seems to be stalled. "
1739 "Dumping lock state...\n");
1740 gfs2_dump_lockstate(sdp);
1741 t = jiffies;
1742 }
1743
Steven Whitehouse61be0842007-01-29 11:51:45 +00001744 down_write(&gfs2_umount_flush_sem);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001745 invalidate_inodes(sdp->sd_vfs);
Steven Whitehouse61be0842007-01-29 11:51:45 +00001746 up_write(&gfs2_umount_flush_sem);
Steven Whitehousefd88de562006-05-05 16:59:11 -04001747 msleep(10);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001748 }
1749}
1750
1751/*
1752 * Diagnostic routines to help debug distributed deadlock
1753 */
1754
Robert Peterson04b933f2007-03-23 17:05:15 -05001755static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1756 unsigned long address)
1757{
Robert Peterson7a0079d2007-04-17 11:37:11 -05001758 char buffer[KSYM_SYMBOL_LEN];
Robert Peterson04b933f2007-03-23 17:05:15 -05001759
Robert Peterson7a0079d2007-04-17 11:37:11 -05001760 sprint_symbol(buffer, address);
1761 print_dbg(gi, fmt, buffer);
Robert Peterson04b933f2007-03-23 17:05:15 -05001762}
1763
David Teiglandb3b94fa2006-01-16 16:50:04 +00001764/**
1765 * dump_holder - print information about a glock holder
1766 * @str: a string naming the type of holder
1767 * @gh: the glock holder
1768 *
1769 * Returns: 0 on success, -ENOBUFS when we run out of space
1770 */
1771
Robert Peterson7c52b162007-03-16 10:26:37 +00001772static int dump_holder(struct glock_iter *gi, char *str,
1773 struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001774{
1775 unsigned int x;
Robert Peterson04b933f2007-03-23 17:05:15 -05001776 struct task_struct *gh_owner;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001777
Robert Peterson7c52b162007-03-16 10:26:37 +00001778 print_dbg(gi, " %s\n", str);
Robert Peterson04b933f2007-03-23 17:05:15 -05001779 if (gh->gh_owner_pid) {
1780 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1781 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1782 if (gh_owner)
1783 print_dbg(gi, "(%s)\n", gh_owner->comm);
1784 else
1785 print_dbg(gi, "(ended)\n");
1786 } else
1787 print_dbg(gi, " owner = -1\n");
Robert Peterson7c52b162007-03-16 10:26:37 +00001788 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1789 print_dbg(gi, " gh_flags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001790 for (x = 0; x < 32; x++)
1791 if (gh->gh_flags & (1 << x))
Robert Peterson7c52b162007-03-16 10:26:37 +00001792 print_dbg(gi, " %u", x);
1793 print_dbg(gi, " \n");
1794 print_dbg(gi, " error = %d\n", gh->gh_error);
1795 print_dbg(gi, " gh_iflags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001796 for (x = 0; x < 32; x++)
1797 if (test_bit(x, &gh->gh_iflags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001798 print_dbg(gi, " %u", x);
1799 print_dbg(gi, " \n");
Robert Peterson04b933f2007-03-23 17:05:15 -05001800 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001801
Robert Peterson7c52b162007-03-16 10:26:37 +00001802 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001803}
1804
1805/**
1806 * dump_inode - print information about an inode
1807 * @ip: the inode
1808 *
1809 * Returns: 0 on success, -ENOBUFS when we run out of space
1810 */
1811
Robert Peterson7c52b162007-03-16 10:26:37 +00001812static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001813{
1814 unsigned int x;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001815
Robert Peterson7c52b162007-03-16 10:26:37 +00001816 print_dbg(gi, " Inode:\n");
1817 print_dbg(gi, " num = %llu/%llu\n",
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01001818 (unsigned long long)ip->i_no_formal_ino,
1819 (unsigned long long)ip->i_no_addr);
Robert Peterson7c52b162007-03-16 10:26:37 +00001820 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1821 print_dbg(gi, " i_flags =");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001822 for (x = 0; x < 32; x++)
1823 if (test_bit(x, &ip->i_flags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001824 print_dbg(gi, " %u", x);
1825 print_dbg(gi, " \n");
1826 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001827}
1828
1829/**
1830 * dump_glock - print information about a glock
1831 * @gl: the glock
1832 * @count: where we are in the buffer
1833 *
1834 * Returns: 0 on success, -ENOBUFS when we run out of space
1835 */
1836
Robert Peterson7c52b162007-03-16 10:26:37 +00001837static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001838{
1839 struct gfs2_holder *gh;
1840 unsigned int x;
1841 int error = -ENOBUFS;
Robert Peterson04b933f2007-03-23 17:05:15 -05001842 struct task_struct *gl_owner;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001843
1844 spin_lock(&gl->gl_spin);
1845
Robert Peterson7c52b162007-03-16 10:26:37 +00001846 print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1847 (unsigned long long)gl->gl_name.ln_number);
1848 print_dbg(gi, " gl_flags =");
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001849 for (x = 0; x < 32; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001850 if (test_bit(x, &gl->gl_flags))
Robert Peterson7c52b162007-03-16 10:26:37 +00001851 print_dbg(gi, " %u", x);
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001852 }
Robert Peterson04b933f2007-03-23 17:05:15 -05001853 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1854 print_dbg(gi, " (unlocked)");
Robert Peterson7c52b162007-03-16 10:26:37 +00001855 print_dbg(gi, " \n");
1856 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1857 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
Robert Peterson04b933f2007-03-23 17:05:15 -05001858 if (gl->gl_owner_pid) {
1859 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1860 if (gl_owner)
1861 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1862 gl->gl_owner_pid, gl_owner->comm);
1863 else
1864 print_dbg(gi, " gl_owner = %d (ended)\n",
1865 gl->gl_owner_pid);
1866 } else
1867 print_dbg(gi, " gl_owner = -1\n");
Robert Peterson7c52b162007-03-16 10:26:37 +00001868 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1869 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1870 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1871 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1872 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1873 print_dbg(gi, " le = %s\n",
David Teiglandb3b94fa2006-01-16 16:50:04 +00001874 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
Robert Peterson7c52b162007-03-16 10:26:37 +00001875 print_dbg(gi, " reclaim = %s\n",
1876 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001877 if (gl->gl_aspace)
Robert Peterson7c52b162007-03-16 10:26:37 +00001878 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1879 gl->gl_aspace->i_mapping->nrpages);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001880 else
Robert Peterson7c52b162007-03-16 10:26:37 +00001881 print_dbg(gi, " aspace = no\n");
1882 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001883 if (gl->gl_req_gh) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001884 error = dump_holder(gi, "Request", gl->gl_req_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001885 if (error)
1886 goto out;
1887 }
1888 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001889 error = dump_holder(gi, "Holder", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001890 if (error)
1891 goto out;
1892 }
1893 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001894 error = dump_holder(gi, "Waiter1", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001895 if (error)
1896 goto out;
1897 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001898 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
Robert Peterson7c52b162007-03-16 10:26:37 +00001899 error = dump_holder(gi, "Waiter3", gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001900 if (error)
1901 goto out;
1902 }
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001903 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1904 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
Robert Petersoncd81a4b2007-05-14 12:42:18 -05001905 gl->gl_demote_state, (unsigned long long)
1906 (jiffies - gl->gl_demote_time)*(1000000/HZ));
Steven Whitehouse3b8249f2007-03-16 09:40:31 +00001907 }
Steven Whitehouse5c676f62006-02-27 17:23:27 -05001908 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001909 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
Robert Peterson7c52b162007-03-16 10:26:37 +00001910 list_empty(&gl->gl_holders)) {
1911 error = dump_inode(gi, gl->gl_object);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001912 if (error)
1913 goto out;
1914 } else {
1915 error = -ENOBUFS;
Robert Peterson7c52b162007-03-16 10:26:37 +00001916 print_dbg(gi, " Inode: busy\n");
David Teiglandb3b94fa2006-01-16 16:50:04 +00001917 }
1918 }
1919
1920 error = 0;
1921
Steven Whitehousea91ea692006-09-04 12:04:26 -04001922out:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001923 spin_unlock(&gl->gl_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001924 return error;
1925}
1926
1927/**
1928 * gfs2_dump_lockstate - print out the current lockstate
1929 * @sdp: the filesystem
1930 * @ub: the buffer to copy the information into
1931 *
1932 * If @ub is NULL, dump the lockstate to the console.
1933 *
1934 */
1935
Adrian Bunk08bc2db2006-04-28 10:59:12 -04001936static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001937{
David Teiglandb3b94fa2006-01-16 16:50:04 +00001938 struct gfs2_glock *gl;
Steven Whitehouseb6397892006-09-12 10:10:01 -04001939 struct hlist_node *h;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001940 unsigned int x;
1941 int error = 0;
1942
1943 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00001944
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001945 read_lock(gl_lock_addr(x));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001946
Steven Whitehouseb6397892006-09-12 10:10:01 -04001947 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001948 if (gl->gl_sbd != sdp)
1949 continue;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001950
Robert Peterson7c52b162007-03-16 10:26:37 +00001951 error = dump_glock(NULL, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001952 if (error)
1953 break;
1954 }
1955
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001956 read_unlock(gl_lock_addr(x));
David Teiglandb3b94fa2006-01-16 16:50:04 +00001957
1958 if (error)
1959 break;
1960 }
1961
1962
1963 return error;
1964}
1965
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001966int __init gfs2_glock_init(void)
1967{
1968 unsigned i;
1969 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
Steven Whitehouseb6397892006-09-12 10:10:01 -04001970 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001971 }
Steven Whitehouse087efdd2006-09-09 16:59:11 -04001972#ifdef GL_HASH_LOCK_SZ
1973 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1974 rwlock_init(&gl_hash_locks[i]);
1975 }
1976#endif
Steven Whitehouse85d1da62006-09-07 14:40:21 -04001977 return 0;
1978}
1979
Robert Peterson7c52b162007-03-16 10:26:37 +00001980static int gfs2_glock_iter_next(struct glock_iter *gi)
1981{
Robert Peterson7a0079d2007-04-17 11:37:11 -05001982 read_lock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00001983 while (1) {
1984 if (!gi->hb_list) { /* If we don't have a hash bucket yet */
1985 gi->hb_list = &gl_hash_table[gi->hash].hb_list;
1986 if (hlist_empty(gi->hb_list)) {
Robert Peterson7a0079d2007-04-17 11:37:11 -05001987 read_unlock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00001988 gi->hash++;
Robert Peterson7a0079d2007-04-17 11:37:11 -05001989 read_lock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00001990 gi->hb_list = NULL;
Robert Peterson7a0079d2007-04-17 11:37:11 -05001991 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1992 read_unlock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00001993 return 1;
Robert Peterson7a0079d2007-04-17 11:37:11 -05001994 }
Robert Peterson7c52b162007-03-16 10:26:37 +00001995 else
1996 continue;
1997 }
1998 if (!hlist_empty(gi->hb_list)) {
1999 gi->gl = list_entry(gi->hb_list->first,
2000 struct gfs2_glock,
2001 gl_list);
2002 }
2003 } else {
2004 if (gi->gl->gl_list.next == NULL) {
Robert Peterson7a0079d2007-04-17 11:37:11 -05002005 read_unlock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00002006 gi->hash++;
Robert Peterson7a0079d2007-04-17 11:37:11 -05002007 read_lock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00002008 gi->hb_list = NULL;
2009 continue;
2010 }
2011 gi->gl = list_entry(gi->gl->gl_list.next,
2012 struct gfs2_glock, gl_list);
2013 }
2014 if (gi->gl)
2015 break;
2016 }
Robert Peterson7a0079d2007-04-17 11:37:11 -05002017 read_unlock(gl_lock_addr(gi->hash));
Robert Peterson7c52b162007-03-16 10:26:37 +00002018 return 0;
2019}
2020
2021static void gfs2_glock_iter_free(struct glock_iter *gi)
2022{
2023 kfree(gi);
2024}
2025
2026static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2027{
2028 struct glock_iter *gi;
2029
2030 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2031 if (!gi)
2032 return NULL;
2033
2034 gi->sdp = sdp;
2035 gi->hash = 0;
2036 gi->gl = NULL;
2037 gi->hb_list = NULL;
2038 gi->seq = NULL;
2039 memset(gi->string, 0, sizeof(gi->string));
2040
2041 if (gfs2_glock_iter_next(gi)) {
2042 gfs2_glock_iter_free(gi);
2043 return NULL;
2044 }
2045
2046 return gi;
2047}
2048
2049static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2050{
2051 struct glock_iter *gi;
2052 loff_t n = *pos;
2053
2054 gi = gfs2_glock_iter_init(file->private);
2055 if (!gi)
2056 return NULL;
2057
2058 while (n--) {
2059 if (gfs2_glock_iter_next(gi)) {
2060 gfs2_glock_iter_free(gi);
2061 return NULL;
2062 }
2063 }
2064
2065 return gi;
2066}
2067
2068static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2069 loff_t *pos)
2070{
2071 struct glock_iter *gi = iter_ptr;
2072
2073 (*pos)++;
2074
2075 if (gfs2_glock_iter_next(gi)) {
2076 gfs2_glock_iter_free(gi);
2077 return NULL;
2078 }
2079
2080 return gi;
2081}
2082
2083static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2084{
2085 /* nothing for now */
2086}
2087
2088static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2089{
2090 struct glock_iter *gi = iter_ptr;
2091
2092 gi->seq = file;
2093 dump_glock(gi, gi->gl);
2094
2095 return 0;
2096}
2097
2098static struct seq_operations gfs2_glock_seq_ops = {
2099 .start = gfs2_glock_seq_start,
2100 .next = gfs2_glock_seq_next,
2101 .stop = gfs2_glock_seq_stop,
2102 .show = gfs2_glock_seq_show,
2103};
2104
2105static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2106{
2107 struct seq_file *seq;
2108 int ret;
2109
2110 ret = seq_open(file, &gfs2_glock_seq_ops);
2111 if (ret)
2112 return ret;
2113
2114 seq = file->private_data;
2115 seq->private = inode->i_private;
2116
2117 return 0;
2118}
2119
2120static const struct file_operations gfs2_debug_fops = {
2121 .owner = THIS_MODULE,
2122 .open = gfs2_debugfs_open,
2123 .read = seq_read,
2124 .llseek = seq_lseek,
2125 .release = seq_release
2126};
2127
2128int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2129{
Robert Peterson5f882092007-04-18 11:41:11 -05002130 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2131 if (!sdp->debugfs_dir)
2132 return -ENOMEM;
2133 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2134 S_IFREG | S_IRUGO,
2135 sdp->debugfs_dir, sdp,
2136 &gfs2_debug_fops);
2137 if (!sdp->debugfs_dentry_glocks)
Robert Peterson7c52b162007-03-16 10:26:37 +00002138 return -ENOMEM;
2139
2140 return 0;
2141}
2142
2143void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2144{
Robert Peterson5f882092007-04-18 11:41:11 -05002145 if (sdp && sdp->debugfs_dir) {
2146 if (sdp->debugfs_dentry_glocks) {
2147 debugfs_remove(sdp->debugfs_dentry_glocks);
2148 sdp->debugfs_dentry_glocks = NULL;
2149 }
2150 debugfs_remove(sdp->debugfs_dir);
2151 sdp->debugfs_dir = NULL;
2152 }
Robert Peterson7c52b162007-03-16 10:26:37 +00002153}
2154
2155int gfs2_register_debugfs(void)
2156{
2157 gfs2_root = debugfs_create_dir("gfs2", NULL);
2158 return gfs2_root ? 0 : -ENOMEM;
2159}
2160
2161void gfs2_unregister_debugfs(void)
2162{
2163 debugfs_remove(gfs2_root);
Robert Peterson5f882092007-04-18 11:41:11 -05002164 gfs2_root = NULL;
Robert Peterson7c52b162007-03-16 10:26:37 +00002165}