Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/lockd/svclock.c |
| 4 | * |
| 5 | * Handling of server-side locks, mostly of the blocked variety. |
| 6 | * This is the ugliest part of lockd because we tread on very thin ice. |
| 7 | * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. |
| 8 | * IMNSHO introducing the grant callback into the NLM protocol was one |
| 9 | * of the worst ideas Sun ever had. Except maybe for the idea of doing |
| 10 | * NFS file locking at all. |
| 11 | * |
| 12 | * I'm trying hard to avoid race conditions by protecting most accesses |
| 13 | * to a file's list of blocked locks through a semaphore. The global |
| 14 | * list of blocked locks is not protected in this fashion however. |
| 15 | * Therefore, some functions (such as the RPC callback for the async grant |
| 16 | * call) move blocked locks towards the head of the list *while some other |
| 17 | * process might be traversing it*. This should not be a problem in |
| 18 | * practice, because this will only cause functions traversing the list |
| 19 | * to visit some blocks twice. |
| 20 | * |
| 21 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
| 22 | */ |
| 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/errno.h> |
| 27 | #include <linux/kernel.h> |
| 28 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/sunrpc/clnt.h> |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 30 | #include <linux/sunrpc/svc_xprt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/lockd/nlm.h> |
| 32 | #include <linux/lockd/lockd.h> |
Jeff Layton | d751a7c | 2008-02-07 16:34:55 -0500 | [diff] [blame] | 33 | #include <linux/kthread.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | #define NLMDBG_FACILITY NLMDBG_SVCLOCK |
| 36 | |
| 37 | #ifdef CONFIG_LOCKD_V4 |
| 38 | #define nlm_deadlock nlm4_deadlock |
| 39 | #else |
| 40 | #define nlm_deadlock nlm_lck_denied |
| 41 | #endif |
| 42 | |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 43 | static void nlmsvc_release_block(struct nlm_block *block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 45 | static void nlmsvc_remove_block(struct nlm_block *block); |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 46 | |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 47 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); |
| 48 | static void nlmsvc_freegrantargs(struct nlm_rqst *call); |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 49 | static const struct rpc_call_ops nlmsvc_grant_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * The list of blocked locks to retry |
| 53 | */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 54 | static LIST_HEAD(nlm_blocked); |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 55 | static DEFINE_SPINLOCK(nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Jeff Layton | 10b8956 | 2014-11-17 16:58:03 -0500 | [diff] [blame] | 57 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Trond Myklebust | ffa94db | 2012-03-20 09:22:00 -0400 | [diff] [blame] | 58 | static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) |
| 59 | { |
| 60 | /* |
Jeff Layton | 3c51991 | 2015-01-22 08:19:32 -0500 | [diff] [blame] | 61 | * We can get away with a static buffer because this is only called |
| 62 | * from lockd, which is single-threaded. |
Trond Myklebust | ffa94db | 2012-03-20 09:22:00 -0400 | [diff] [blame] | 63 | */ |
| 64 | static char buf[2*NLM_MAXCOOKIELEN+1]; |
| 65 | unsigned int i, len = sizeof(buf); |
| 66 | char *p = buf; |
| 67 | |
| 68 | len--; /* allow for trailing \0 */ |
| 69 | if (len < 3) |
| 70 | return "???"; |
| 71 | for (i = 0 ; i < cookie->len ; i++) { |
| 72 | if (len < 2) { |
| 73 | strcpy(p-3, "..."); |
| 74 | break; |
| 75 | } |
| 76 | sprintf(p, "%02x", cookie->data[i]); |
| 77 | p += 2; |
| 78 | len -= 2; |
| 79 | } |
| 80 | *p = '\0'; |
| 81 | |
| 82 | return buf; |
| 83 | } |
| 84 | #endif |
| 85 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | /* |
| 87 | * Insert a blocked lock into the global list |
| 88 | */ |
| 89 | static void |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 90 | nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 92 | struct nlm_block *b; |
| 93 | struct list_head *pos; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 96 | if (list_empty(&block->b_list)) { |
| 97 | kref_get(&block->b_count); |
| 98 | } else { |
| 99 | list_del_init(&block->b_list); |
| 100 | } |
| 101 | |
| 102 | pos = &nlm_blocked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | if (when != NLM_NEVER) { |
| 104 | if ((when += jiffies) == NLM_NEVER) |
| 105 | when ++; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 106 | list_for_each(pos, &nlm_blocked) { |
| 107 | b = list_entry(pos, struct nlm_block, b_list); |
| 108 | if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) |
| 109 | break; |
| 110 | } |
| 111 | /* On normal exit from the loop, pos == &nlm_blocked, |
| 112 | * so we will be adding to the end of the list - good |
| 113 | */ |
| 114 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 116 | list_add_tail(&block->b_list, pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | block->b_when = when; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 120 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) |
| 121 | { |
| 122 | spin_lock(&nlm_blocked_lock); |
| 123 | nlmsvc_insert_block_locked(block, when); |
| 124 | spin_unlock(&nlm_blocked_lock); |
| 125 | } |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | /* |
| 128 | * Remove a block from the global list |
| 129 | */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 130 | static inline void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | nlmsvc_remove_block(struct nlm_block *block) |
| 132 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 133 | if (!list_empty(&block->b_list)) { |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 134 | spin_lock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 135 | list_del_init(&block->b_list); |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 136 | spin_unlock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 137 | nlmsvc_release_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /* |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 142 | * Find a block for a given lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | */ |
| 144 | static struct nlm_block * |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 145 | nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 147 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | struct file_lock *fl; |
| 149 | |
| 150 | dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", |
| 151 | file, lock->fl.fl_pid, |
| 152 | (long long)lock->fl.fl_start, |
| 153 | (long long)lock->fl.fl_end, lock->fl.fl_type); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 154 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 155 | fl = &block->b_call->a_args.lock.fl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", |
| 157 | block->b_file, fl->fl_pid, |
| 158 | (long long)fl->fl_start, |
| 159 | (long long)fl->fl_end, fl->fl_type, |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 160 | nlmdbg_cookie2a(&block->b_call->a_args.cookie)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 162 | kref_get(&block->b_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | return block; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | return NULL; |
| 168 | } |
| 169 | |
| 170 | static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) |
| 171 | { |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 172 | if (a->len != b->len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | return 0; |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 174 | if (memcmp(a->data, b->data, a->len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | return 0; |
| 176 | return 1; |
| 177 | } |
| 178 | |
| 179 | /* |
| 180 | * Find a block with a given NLM cookie. |
| 181 | */ |
| 182 | static inline struct nlm_block * |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 183 | nlmsvc_find_block(struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { |
| 185 | struct nlm_block *block; |
| 186 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 187 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 188 | if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 189 | goto found; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 192 | return NULL; |
| 193 | |
| 194 | found: |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 195 | dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 196 | kref_get(&block->b_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | return block; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Create a block and initialize it. |
| 202 | * |
| 203 | * Note: we explicitly set the cookie of the grant reply to that of |
| 204 | * the blocked lock request. The spec explicitly mentions that the client |
| 205 | * should _not_ rely on the callback containing the same cookie as the |
| 206 | * request, but (as I found out later) that's because some implementations |
| 207 | * do just this. Never mind the standards comittees, they support our |
| 208 | * logging industries. |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 209 | * |
| 210 | * 10 years later: I hope we can safely ignore these old and broken |
| 211 | * clients by now. Let's fix this so we can uniquely identify an incoming |
| 212 | * GRANTED_RES message by cookie, without having to rely on the client's IP |
| 213 | * address. --okir |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | */ |
Trond Myklebust | 255129d | 2007-09-25 15:55:03 -0400 | [diff] [blame] | 215 | static struct nlm_block * |
| 216 | nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, |
| 217 | struct nlm_file *file, struct nlm_lock *lock, |
| 218 | struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { |
| 220 | struct nlm_block *block; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 221 | struct nlm_rqst *call = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 223 | call = nlm_alloc_call(host); |
| 224 | if (call == NULL) |
| 225 | return NULL; |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | /* Allocate memory for block, and initialize arguments */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 228 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
| 229 | if (block == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | goto failed; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 231 | kref_init(&block->b_count); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 232 | INIT_LIST_HEAD(&block->b_list); |
| 233 | INIT_LIST_HEAD(&block->b_flist); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 235 | if (!nlmsvc_setgrantargs(call, lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | goto failed_free; |
| 237 | |
| 238 | /* Set notifier function for VFS, and init args */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 239 | call->a_args.lock.fl.fl_flags |= FL_SLEEP; |
| 240 | call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 241 | nlmclnt_next_cookie(&call->a_args.cookie); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | |
| 243 | dprintk("lockd: created block %p...\n", block); |
| 244 | |
| 245 | /* Create and initialize the block */ |
| 246 | block->b_daemon = rqstp->rq_server; |
| 247 | block->b_host = host; |
| 248 | block->b_file = file; |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 249 | file->f_count++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | /* Add to file's list of blocks */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 252 | list_add(&block->b_flist, &file->f_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
| 254 | /* Set up RPC arguments for callback */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 255 | block->b_call = call; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | call->a_flags = RPC_TASK_ASYNC; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 257 | call->a_block = block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
| 259 | return block; |
| 260 | |
| 261 | failed_free: |
| 262 | kfree(block); |
| 263 | failed: |
Chuck Lever | 7db836d | 2010-12-14 15:05:42 +0000 | [diff] [blame] | 264 | nlmsvc_release_call(call); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | return NULL; |
| 266 | } |
| 267 | |
| 268 | /* |
J. Bruce Fields | 3c61eec | 2008-04-07 13:05:27 -0400 | [diff] [blame] | 269 | * Delete a block. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | * It is the caller's responsibility to check whether the file |
| 271 | * can be closed hereafter. |
| 272 | */ |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 273 | static int nlmsvc_unlink_block(struct nlm_block *block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | { |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 275 | int status; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 276 | dprintk("lockd: unlinking block %p...\n", block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
| 278 | /* Remove block from list */ |
NeilBrown | cb03f94 | 2018-11-30 10:04:08 +1100 | [diff] [blame] | 279 | status = locks_delete_block(&block->b_call->a_args.lock.fl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | nlmsvc_remove_block(block); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 281 | return status; |
| 282 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 284 | static void nlmsvc_free_block(struct kref *kref) |
| 285 | { |
| 286 | struct nlm_block *block = container_of(kref, struct nlm_block, b_count); |
| 287 | struct nlm_file *file = block->b_file; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 288 | |
| 289 | dprintk("lockd: freeing block %p...\n", block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
| 291 | /* Remove block from file's list of blocks */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 292 | list_del_init(&block->b_flist); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 293 | mutex_unlock(&file->f_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 295 | nlmsvc_freegrantargs(block->b_call); |
Chuck Lever | 7db836d | 2010-12-14 15:05:42 +0000 | [diff] [blame] | 296 | nlmsvc_release_call(block->b_call); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 297 | nlm_release_file(block->b_file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | kfree(block); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | static void nlmsvc_release_block(struct nlm_block *block) |
| 302 | { |
| 303 | if (block != NULL) |
Al Viro | c5aa1e5 | 2012-08-29 09:00:01 -0400 | [diff] [blame] | 304 | kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | } |
| 306 | |
Olaf Kirch | f2af793 | 2006-10-04 02:15:59 -0700 | [diff] [blame] | 307 | /* |
| 308 | * Loop over all blocks and delete blocks held by |
| 309 | * a matching host. |
| 310 | */ |
| 311 | void nlmsvc_traverse_blocks(struct nlm_host *host, |
| 312 | struct nlm_file *file, |
| 313 | nlm_host_match_fn_t match) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 314 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 315 | struct nlm_block *block, *next; |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 316 | |
| 317 | restart: |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 318 | mutex_lock(&file->f_mutex); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 319 | list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { |
Olaf Kirch | f2af793 | 2006-10-04 02:15:59 -0700 | [diff] [blame] | 320 | if (!match(block->b_host, host)) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 321 | continue; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 322 | /* Do not destroy blocks that are not on |
| 323 | * the global retry list - why? */ |
| 324 | if (list_empty(&block->b_list)) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 325 | continue; |
| 326 | kref_get(&block->b_count); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 327 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 328 | nlmsvc_unlink_block(block); |
| 329 | nlmsvc_release_block(block); |
| 330 | goto restart; |
| 331 | } |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 332 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 333 | } |
| 334 | |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 335 | static struct nlm_lockowner * |
| 336 | nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) |
| 337 | { |
| 338 | refcount_inc(&lockowner->count); |
| 339 | return lockowner; |
| 340 | } |
| 341 | |
| 342 | static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) |
| 343 | { |
| 344 | if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) |
| 345 | return; |
| 346 | list_del(&lockowner->list); |
| 347 | spin_unlock(&lockowner->host->h_lock); |
| 348 | nlmsvc_release_host(lockowner->host); |
| 349 | kfree(lockowner); |
| 350 | } |
| 351 | |
| 352 | static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) |
| 353 | { |
| 354 | struct nlm_lockowner *lockowner; |
| 355 | list_for_each_entry(lockowner, &host->h_lockowners, list) { |
| 356 | if (lockowner->pid != pid) |
| 357 | continue; |
| 358 | return nlmsvc_get_lockowner(lockowner); |
| 359 | } |
| 360 | return NULL; |
| 361 | } |
| 362 | |
| 363 | static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) |
| 364 | { |
| 365 | struct nlm_lockowner *res, *new = NULL; |
| 366 | |
| 367 | spin_lock(&host->h_lock); |
| 368 | res = __nlmsvc_find_lockowner(host, pid); |
| 369 | |
| 370 | if (res == NULL) { |
| 371 | spin_unlock(&host->h_lock); |
| 372 | new = kmalloc(sizeof(*res), GFP_KERNEL); |
| 373 | spin_lock(&host->h_lock); |
| 374 | res = __nlmsvc_find_lockowner(host, pid); |
| 375 | if (res == NULL && new != NULL) { |
| 376 | res = new; |
| 377 | /* fs/locks.c will manage the refcount through lock_ops */ |
| 378 | refcount_set(&new->count, 1); |
| 379 | new->pid = pid; |
| 380 | new->host = nlm_get_host(host); |
| 381 | list_add(&new->list, &host->h_lockowners); |
| 382 | new = NULL; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | spin_unlock(&host->h_lock); |
| 387 | kfree(new); |
| 388 | return res; |
| 389 | } |
| 390 | |
| 391 | void |
| 392 | nlmsvc_release_lockowner(struct nlm_lock *lock) |
| 393 | { |
| 394 | if (lock->fl.fl_owner) |
| 395 | nlmsvc_put_lockowner(lock->fl.fl_owner); |
| 396 | } |
| 397 | |
| 398 | static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl) |
| 399 | { |
| 400 | struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner; |
| 401 | new->fl_owner = nlmsvc_get_lockowner(nlm_lo); |
| 402 | } |
| 403 | |
| 404 | static void nlmsvc_locks_release_private(struct file_lock *fl) |
| 405 | { |
| 406 | nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner); |
| 407 | } |
| 408 | |
YueHaibing | 291adeb | 2019-05-28 17:06:52 +0800 | [diff] [blame] | 409 | static const struct file_lock_operations nlmsvc_lock_ops = { |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 410 | .fl_copy_lock = nlmsvc_locks_copy_lock, |
| 411 | .fl_release_private = nlmsvc_locks_release_private, |
| 412 | }; |
| 413 | |
| 414 | void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, |
| 415 | pid_t pid) |
| 416 | { |
| 417 | fl->fl_owner = nlmsvc_find_lockowner(host, pid); |
| 418 | if (fl->fl_owner != NULL) |
| 419 | fl->fl_ops = &nlmsvc_lock_ops; |
| 420 | } |
| 421 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | /* |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 423 | * Initialize arguments for GRANTED call. The nlm_rqst structure |
| 424 | * has been cleared already. |
| 425 | */ |
| 426 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) |
| 427 | { |
| 428 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); |
| 429 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); |
Serge E. Hallyn | e9ff399 | 2006-10-02 02:18:11 -0700 | [diff] [blame] | 430 | call->a_args.lock.caller = utsname()->nodename; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 431 | call->a_args.lock.oh.len = lock->oh.len; |
| 432 | |
| 433 | /* set default data area */ |
| 434 | call->a_args.lock.oh.data = call->a_owner; |
Benjamin Coddington | 646d73e | 2019-05-23 10:45:47 -0400 | [diff] [blame] | 435 | call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 436 | |
| 437 | if (lock->oh.len > NLMCLNT_OHSIZE) { |
| 438 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 439 | if (!data) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 440 | return 0; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 441 | call->a_args.lock.oh.data = (u8 *) data; |
| 442 | } |
| 443 | |
| 444 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); |
| 445 | return 1; |
| 446 | } |
| 447 | |
| 448 | static void nlmsvc_freegrantargs(struct nlm_rqst *call) |
| 449 | { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 450 | if (call->a_args.lock.oh.data != call->a_owner) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 451 | kfree(call->a_args.lock.oh.data); |
Felix Blyakher | a9e61e2 | 2009-03-31 15:12:56 -0500 | [diff] [blame] | 452 | |
| 453 | locks_release_private(&call->a_args.lock.fl); |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 454 | } |
| 455 | |
| 456 | /* |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 457 | * Deferred lock request handling for non-blocking lock |
| 458 | */ |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 459 | static __be32 |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 460 | nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) |
| 461 | { |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 462 | __be32 status = nlm_lck_denied_nolocks; |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 463 | |
| 464 | block->b_flags |= B_QUEUED; |
| 465 | |
| 466 | nlmsvc_insert_block(block, NLM_TIMEOUT); |
| 467 | |
| 468 | block->b_cache_req = &rqstp->rq_chandle; |
| 469 | if (rqstp->rq_chandle.defer) { |
| 470 | block->b_deferred_req = |
| 471 | rqstp->rq_chandle.defer(block->b_cache_req); |
| 472 | if (block->b_deferred_req != NULL) |
| 473 | status = nlm_drop_reply; |
| 474 | } |
| 475 | dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 476 | block, block->b_flags, ntohl(status)); |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 477 | |
| 478 | return status; |
| 479 | } |
| 480 | |
| 481 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | * Attempt to establish a lock, and if it can't be granted, block it |
| 483 | * if required. |
| 484 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 485 | __be32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, |
Jeff Layton | 6cde4de | 2008-07-15 14:26:17 -0400 | [diff] [blame] | 487 | struct nlm_host *host, struct nlm_lock *lock, int wait, |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 488 | struct nlm_cookie *cookie, int reclaim) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | { |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 490 | struct nlm_block *block = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | int error; |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 492 | __be32 ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | |
| 494 | dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", |
Amir Goldstein | 64bed6c | 2018-07-13 17:22:24 +0300 | [diff] [blame] | 495 | locks_inode(file->f_file)->i_sb->s_id, |
| 496 | locks_inode(file->f_file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | lock->fl.fl_type, lock->fl.fl_pid, |
| 498 | (long long)lock->fl.fl_start, |
| 499 | (long long)lock->fl.fl_end, |
| 500 | wait); |
| 501 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | /* Lock file against concurrent access */ |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 503 | mutex_lock(&file->f_mutex); |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 504 | /* Get existing block (in case client is busy-waiting) |
| 505 | * or create new block |
| 506 | */ |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 507 | block = nlmsvc_lookup_block(file, lock); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 508 | if (block == NULL) { |
J. Bruce Fields | 560de0e | 2008-07-15 15:05:45 -0400 | [diff] [blame] | 509 | block = nlmsvc_create_block(rqstp, host, file, lock, cookie); |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 510 | ret = nlm_lck_denied_nolocks; |
| 511 | if (block == NULL) |
| 512 | goto out; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 513 | lock = &block->b_call->a_args.lock; |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 514 | } else |
| 515 | lock->fl.fl_flags &= ~FL_SLEEP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 517 | if (block->b_flags & B_QUEUED) { |
| 518 | dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", |
| 519 | block, block->b_flags); |
| 520 | if (block->b_granted) { |
| 521 | nlmsvc_unlink_block(block); |
| 522 | ret = nlm_granted; |
| 523 | goto out; |
| 524 | } |
| 525 | if (block->b_flags & B_TIMED_OUT) { |
| 526 | nlmsvc_unlink_block(block); |
| 527 | ret = nlm_lck_denied; |
| 528 | goto out; |
| 529 | } |
| 530 | ret = nlm_drop_reply; |
| 531 | goto out; |
| 532 | } |
| 533 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 534 | if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 535 | ret = nlm_lck_denied_grace_period; |
| 536 | goto out; |
| 537 | } |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 538 | if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { |
J. Bruce Fields | d22b1cf | 2008-02-06 15:05:12 -0500 | [diff] [blame] | 539 | ret = nlm_lck_denied_grace_period; |
| 540 | goto out; |
| 541 | } |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 542 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 543 | if (!wait) |
| 544 | lock->fl.fl_flags &= ~FL_SLEEP; |
| 545 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 546 | lock->fl.fl_flags &= ~FL_SLEEP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 548 | dprintk("lockd: vfs_lock_file returned %d\n", error); |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 549 | switch (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | case 0: |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 551 | ret = nlm_granted; |
| 552 | goto out; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 553 | case -EAGAIN: |
Miklos Szeredi | e33d1ea | 2009-02-09 12:30:43 -0500 | [diff] [blame] | 554 | /* |
| 555 | * If this is a blocking request for an |
| 556 | * already pending lock request then we need |
| 557 | * to put it back on lockd's block list |
| 558 | */ |
| 559 | if (wait) |
| 560 | break; |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 561 | ret = nlm_lck_denied; |
Miklos Szeredi | e33d1ea | 2009-02-09 12:30:43 -0500 | [diff] [blame] | 562 | goto out; |
Miklos Szeredi | bde74e4 | 2008-07-25 01:48:57 -0700 | [diff] [blame] | 563 | case FILE_LOCK_DEFERRED: |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 564 | if (wait) |
| 565 | break; |
| 566 | /* Filesystem lock operation is in progress |
| 567 | Add it to the queue waiting for callback */ |
| 568 | ret = nlmsvc_defer_lock_rqst(rqstp, block); |
| 569 | goto out; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 570 | case -EDEADLK: |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 571 | ret = nlm_deadlock; |
| 572 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | default: /* includes ENOLCK */ |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 574 | ret = nlm_lck_denied_nolocks; |
| 575 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | } |
| 577 | |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 578 | ret = nlm_lck_blocked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | |
| 580 | /* Append to list of blocked */ |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 581 | nlmsvc_insert_block(block, NLM_NEVER); |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 582 | out: |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 583 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 584 | nlmsvc_release_block(block); |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 585 | dprintk("lockd: nlmsvc_lock returned %u\n", ret); |
| 586 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | /* |
| 590 | * Test for presence of a conflicting lock. |
| 591 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 592 | __be32 |
Marc Eshel | 85f3f1b3 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 593 | nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, |
Jeff Layton | 8f920d5 | 2008-07-15 14:06:48 -0400 | [diff] [blame] | 594 | struct nlm_host *host, struct nlm_lock *lock, |
| 595 | struct nlm_lock *conflock, struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | { |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 597 | int error; |
| 598 | __be32 ret; |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 599 | struct nlm_lockowner *test_owner; |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 600 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", |
Amir Goldstein | 64bed6c | 2018-07-13 17:22:24 +0300 | [diff] [blame] | 602 | locks_inode(file->f_file)->i_sb->s_id, |
| 603 | locks_inode(file->f_file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | lock->fl.fl_type, |
| 605 | (long long)lock->fl.fl_start, |
| 606 | (long long)lock->fl.fl_end); |
| 607 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 608 | if (locks_in_grace(SVC_NET(rqstp))) { |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 609 | ret = nlm_lck_denied_grace_period; |
| 610 | goto out; |
| 611 | } |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 612 | |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 613 | /* If there's a conflicting lock, remember to clean up the test lock */ |
| 614 | test_owner = (struct nlm_lockowner *)lock->fl.fl_owner; |
| 615 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 616 | error = vfs_test_lock(file->f_file, &lock->fl); |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 617 | if (error) { |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 618 | /* We can't currently deal with deferred test requests */ |
| 619 | if (error == FILE_LOCK_DEFERRED) |
| 620 | WARN_ON_ONCE(1); |
| 621 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 622 | ret = nlm_lck_denied_nolocks; |
| 623 | goto out; |
| 624 | } |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 625 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 626 | if (lock->fl.fl_type == F_UNLCK) { |
| 627 | ret = nlm_granted; |
| 628 | goto out; |
| 629 | } |
| 630 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 631 | dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", |
| 632 | lock->fl.fl_type, (long long)lock->fl.fl_start, |
| 633 | (long long)lock->fl.fl_end); |
| 634 | conflock->caller = "somehost"; /* FIXME */ |
| 635 | conflock->len = strlen(conflock->caller); |
| 636 | conflock->oh.len = 0; /* don't return OH info */ |
Benjamin Coddington | 646d73e | 2019-05-23 10:45:47 -0400 | [diff] [blame] | 637 | conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 638 | conflock->fl.fl_type = lock->fl.fl_type; |
| 639 | conflock->fl.fl_start = lock->fl.fl_start; |
| 640 | conflock->fl.fl_end = lock->fl.fl_end; |
Kinglong Mee | f328296 | 2014-08-22 10:18:43 -0400 | [diff] [blame] | 641 | locks_release_private(&lock->fl); |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 642 | |
| 643 | /* Clean up the test lock */ |
| 644 | lock->fl.fl_owner = NULL; |
| 645 | nlmsvc_put_lockowner(test_owner); |
| 646 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 647 | ret = nlm_lck_denied; |
| 648 | out: |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 649 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | /* |
| 653 | * Remove a lock. |
| 654 | * This implies a CANCEL call: We send a GRANT_MSG, the client replies |
| 655 | * with a GRANT_RES call which gets lost, and calls UNLOCK immediately |
| 656 | * afterwards. In this case the block will still be there, and hence |
| 657 | * must be removed. |
| 658 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 659 | __be32 |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 660 | nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | { |
| 662 | int error; |
| 663 | |
| 664 | dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", |
Amir Goldstein | 64bed6c | 2018-07-13 17:22:24 +0300 | [diff] [blame] | 665 | locks_inode(file->f_file)->i_sb->s_id, |
| 666 | locks_inode(file->f_file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | lock->fl.fl_pid, |
| 668 | (long long)lock->fl.fl_start, |
| 669 | (long long)lock->fl.fl_end); |
| 670 | |
| 671 | /* First, cancel any lock that might be there */ |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 672 | nlmsvc_cancel_blocked(net, file, lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | |
| 674 | lock->fl.fl_type = F_UNLCK; |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 675 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | |
| 677 | return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * Cancel a previously blocked request. |
| 682 | * |
| 683 | * A cancel request always overrides any grant that may currently |
| 684 | * be in progress. |
| 685 | * The calling procedure must check whether the file can be closed. |
| 686 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 687 | __be32 |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 688 | nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | { |
| 690 | struct nlm_block *block; |
J. Bruce Fields | 64a318e | 2006-01-03 09:55:46 +0100 | [diff] [blame] | 691 | int status = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | |
| 693 | dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", |
Amir Goldstein | 64bed6c | 2018-07-13 17:22:24 +0300 | [diff] [blame] | 694 | locks_inode(file->f_file)->i_sb->s_id, |
| 695 | locks_inode(file->f_file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | lock->fl.fl_pid, |
| 697 | (long long)lock->fl.fl_start, |
| 698 | (long long)lock->fl.fl_end); |
| 699 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 700 | if (locks_in_grace(net)) |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 701 | return nlm_lck_denied_grace_period; |
| 702 | |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 703 | mutex_lock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 704 | block = nlmsvc_lookup_block(file, lock); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 705 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 706 | if (block != NULL) { |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 707 | vfs_cancel_lock(block->b_file->f_file, |
| 708 | &block->b_call->a_args.lock.fl); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 709 | status = nlmsvc_unlink_block(block); |
| 710 | nlmsvc_release_block(block); |
| 711 | } |
J. Bruce Fields | 64a318e | 2006-01-03 09:55:46 +0100 | [diff] [blame] | 712 | return status ? nlm_lck_denied : nlm_granted; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | } |
| 714 | |
| 715 | /* |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 716 | * This is a callback from the filesystem for VFS file lock requests. |
J. Bruce Fields | 8fb47a4 | 2011-07-20 20:21:59 -0400 | [diff] [blame] | 717 | * It will be used if lm_grant is defined and the filesystem can not |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 718 | * respond to the request immediately. |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 719 | * For SETLK or SETLKW request it will get the local posix lock. |
| 720 | * In all cases it will move the block to the head of nlm_blocked q where |
| 721 | * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the |
| 722 | * deferred rpc for GETLK and SETLK. |
| 723 | */ |
| 724 | static void |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 725 | nlmsvc_update_deferred_block(struct nlm_block *block, int result) |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 726 | { |
| 727 | block->b_flags |= B_GOT_CALLBACK; |
| 728 | if (result == 0) |
| 729 | block->b_granted = 1; |
| 730 | else |
| 731 | block->b_flags |= B_TIMED_OUT; |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 732 | } |
| 733 | |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 734 | static int nlmsvc_grant_deferred(struct file_lock *fl, int result) |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 735 | { |
| 736 | struct nlm_block *block; |
| 737 | int rc = -ENOENT; |
| 738 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 739 | spin_lock(&nlm_blocked_lock); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 740 | list_for_each_entry(block, &nlm_blocked, b_list) { |
| 741 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
| 742 | dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", |
| 743 | block, block->b_flags); |
| 744 | if (block->b_flags & B_QUEUED) { |
| 745 | if (block->b_flags & B_TIMED_OUT) { |
| 746 | rc = -ENOLCK; |
| 747 | break; |
| 748 | } |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 749 | nlmsvc_update_deferred_block(block, result); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 750 | } else if (result == 0) |
| 751 | block->b_granted = 1; |
| 752 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 753 | nlmsvc_insert_block_locked(block, 0); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 754 | svc_wake_up(block->b_daemon); |
| 755 | rc = 0; |
| 756 | break; |
| 757 | } |
| 758 | } |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 759 | spin_unlock(&nlm_blocked_lock); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 760 | if (rc == -ENOENT) |
| 761 | printk(KERN_WARNING "lockd: grant for unknown block\n"); |
| 762 | return rc; |
| 763 | } |
| 764 | |
| 765 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | * Unblock a blocked lock request. This is a callback invoked from the |
| 767 | * VFS layer when a lock on which we blocked is removed. |
| 768 | * |
| 769 | * This function doesn't grant the blocked lock instantly, but rather moves |
| 770 | * the block to the head of nlm_blocked where it can be picked up by lockd. |
| 771 | */ |
| 772 | static void |
| 773 | nlmsvc_notify_blocked(struct file_lock *fl) |
| 774 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 775 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | |
| 777 | dprintk("lockd: VFS unblock notification for block %p\n", fl); |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 778 | spin_lock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 779 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 780 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 781 | nlmsvc_insert_block_locked(block, 0); |
| 782 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | svc_wake_up(block->b_daemon); |
| 784 | return; |
| 785 | } |
| 786 | } |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 787 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | printk(KERN_WARNING "lockd: notification for unknown block!\n"); |
| 789 | } |
| 790 | |
Alexey Dobriyan | 7b02196 | 2009-09-21 17:01:12 -0700 | [diff] [blame] | 791 | const struct lock_manager_operations nlmsvc_lock_operations = { |
J. Bruce Fields | 8fb47a4 | 2011-07-20 20:21:59 -0400 | [diff] [blame] | 792 | .lm_notify = nlmsvc_notify_blocked, |
| 793 | .lm_grant = nlmsvc_grant_deferred, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | }; |
| 795 | |
| 796 | /* |
| 797 | * Try to claim a lock that was previously blocked. |
| 798 | * |
| 799 | * Note that we use both the RPC_GRANTED_MSG call _and_ an async |
| 800 | * RPC thread when notifying the client. This seems like overkill... |
| 801 | * Here's why: |
| 802 | * - we don't want to use a synchronous RPC thread, otherwise |
| 803 | * we might find ourselves hanging on a dead portmapper. |
| 804 | * - Some lockd implementations (e.g. HP) don't react to |
| 805 | * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. |
| 806 | */ |
| 807 | static void |
| 808 | nlmsvc_grant_blocked(struct nlm_block *block) |
| 809 | { |
| 810 | struct nlm_file *file = block->b_file; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 811 | struct nlm_lock *lock = &block->b_call->a_args.lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | int error; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 813 | loff_t fl_start, fl_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | |
| 815 | dprintk("lockd: grant blocked lock %p\n", block); |
| 816 | |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 817 | kref_get(&block->b_count); |
| 818 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | /* Unlink block request from list */ |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 820 | nlmsvc_unlink_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | |
| 822 | /* If b_granted is true this means we've been here before. |
| 823 | * Just retry the grant callback, possibly refreshing the RPC |
| 824 | * binding */ |
| 825 | if (block->b_granted) { |
| 826 | nlm_rebind_host(block->b_host); |
| 827 | goto callback; |
| 828 | } |
| 829 | |
| 830 | /* Try the lock operation again */ |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 831 | /* vfs_lock_file() can mangle fl_start and fl_end, but we need |
| 832 | * them unchanged for the GRANT_MSG |
| 833 | */ |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 834 | lock->fl.fl_flags |= FL_SLEEP; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 835 | fl_start = lock->fl.fl_start; |
| 836 | fl_end = lock->fl.fl_end; |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 837 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 838 | lock->fl.fl_flags &= ~FL_SLEEP; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 839 | lock->fl.fl_start = fl_start; |
| 840 | lock->fl.fl_end = fl_end; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 841 | |
Andy Adamson | 5de0e50 | 2006-03-20 13:44:25 -0500 | [diff] [blame] | 842 | switch (error) { |
| 843 | case 0: |
| 844 | break; |
Miklos Szeredi | bde74e4 | 2008-07-25 01:48:57 -0700 | [diff] [blame] | 845 | case FILE_LOCK_DEFERRED: |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 846 | dprintk("lockd: lock still blocked error %d\n", error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | nlmsvc_insert_block(block, NLM_NEVER); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 848 | nlmsvc_release_block(block); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 849 | return; |
Andy Adamson | 5de0e50 | 2006-03-20 13:44:25 -0500 | [diff] [blame] | 850 | default: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", |
Harvey Harrison | 8e24eea | 2008-04-30 00:55:09 -0700 | [diff] [blame] | 852 | -error, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | nlmsvc_insert_block(block, 10 * HZ); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 854 | nlmsvc_release_block(block); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 855 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | callback: |
| 859 | /* Lock was granted by VFS. */ |
| 860 | dprintk("lockd: GRANTing blocked lock.\n"); |
| 861 | block->b_granted = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | |
Jeff Layton | 9706501 | 2008-02-06 11:34:12 -0500 | [diff] [blame] | 863 | /* keep block on the list, but don't reattempt until the RPC |
| 864 | * completes or the submission fails |
| 865 | */ |
| 866 | nlmsvc_insert_block(block, NLM_NEVER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | |
Jeff Layton | 9706501 | 2008-02-06 11:34:12 -0500 | [diff] [blame] | 868 | /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked |
| 869 | * will queue up a new one if this one times out |
| 870 | */ |
| 871 | error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, |
| 872 | &nlmsvc_grant_ops); |
| 873 | |
| 874 | /* RPC submission failed, wait a bit and retry */ |
| 875 | if (error < 0) |
| 876 | nlmsvc_insert_block(block, 10 * HZ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | } |
| 878 | |
| 879 | /* |
| 880 | * This is the callback from the RPC layer when the NLM_GRANTED_MSG |
| 881 | * RPC call has succeeded or timed out. |
| 882 | * Like all RPC callbacks, it is invoked by the rpciod process, so it |
| 883 | * better not sleep. Therefore, we put the blocked lock on the nlm_blocked |
| 884 | * chain once more in order to have it removed by lockd itself (which can |
| 885 | * then sleep on the file semaphore without disrupting e.g. the nfs client). |
| 886 | */ |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 887 | static void nlmsvc_grant_callback(struct rpc_task *task, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | { |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 889 | struct nlm_rqst *call = data; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 890 | struct nlm_block *block = call->a_block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | unsigned long timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | |
| 893 | dprintk("lockd: GRANT_MSG RPC callback\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 895 | spin_lock(&nlm_blocked_lock); |
Jeff Layton | c64e80d | 2008-02-06 11:34:13 -0500 | [diff] [blame] | 896 | /* if the block is not on a list at this point then it has |
| 897 | * been invalidated. Don't try to requeue it. |
| 898 | * |
| 899 | * FIXME: it's possible that the block is removed from the list |
| 900 | * after this check but before the nlmsvc_insert_block. In that |
| 901 | * case it will be added back. Perhaps we need better locking |
| 902 | * for nlm_blocked? |
| 903 | */ |
| 904 | if (list_empty(&block->b_list)) |
Trond Myklebust | a86dc49 | 2008-06-11 13:37:09 -0400 | [diff] [blame] | 905 | goto out; |
Jeff Layton | c64e80d | 2008-02-06 11:34:13 -0500 | [diff] [blame] | 906 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | /* Technically, we should down the file semaphore here. Since we |
| 908 | * move the block towards the head of the queue only, no harm |
| 909 | * can be done, though. */ |
| 910 | if (task->tk_status < 0) { |
| 911 | /* RPC error: Re-insert for retransmission */ |
| 912 | timeout = 10 * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | } else { |
| 914 | /* Call was successful, now wait for client callback */ |
| 915 | timeout = 60 * HZ; |
| 916 | } |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 917 | nlmsvc_insert_block_locked(block, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | svc_wake_up(block->b_daemon); |
Trond Myklebust | a86dc49 | 2008-06-11 13:37:09 -0400 | [diff] [blame] | 919 | out: |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 920 | spin_unlock(&nlm_blocked_lock); |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 921 | } |
| 922 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 923 | /* |
| 924 | * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an |
| 925 | * .rpc_release rpc_call_op |
| 926 | */ |
Adrian Bunk | ec535ce | 2006-04-18 13:21:50 -0400 | [diff] [blame] | 927 | static void nlmsvc_grant_release(void *data) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 928 | { |
Trond Myklebust | 6041b79 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 929 | struct nlm_rqst *call = data; |
Trond Myklebust | 6041b79 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 930 | nlmsvc_release_block(call->a_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | } |
| 932 | |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 933 | static const struct rpc_call_ops nlmsvc_grant_ops = { |
| 934 | .rpc_call_done = nlmsvc_grant_callback, |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 935 | .rpc_release = nlmsvc_grant_release, |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 936 | }; |
| 937 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | /* |
| 939 | * We received a GRANT_RES callback. Try to find the corresponding |
| 940 | * block. |
| 941 | */ |
| 942 | void |
Al Viro | e8c5c04 | 2006-12-13 00:35:03 -0800 | [diff] [blame] | 943 | nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | { |
| 945 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 947 | dprintk("grant_reply: looking for cookie %x, s=%d \n", |
| 948 | *(unsigned int *)(cookie->data), status); |
| 949 | if (!(block = nlmsvc_find_block(cookie))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | |
Colin Ian King | e56efe9 | 2017-04-08 18:09:59 +0100 | [diff] [blame] | 952 | if (status == nlm_lck_denied_grace_period) { |
| 953 | /* Try again in a couple of seconds */ |
| 954 | nlmsvc_insert_block(block, 10 * HZ); |
| 955 | } else { |
| 956 | /* |
| 957 | * Lock is now held by client, or has been rejected. |
| 958 | * In both cases, the block should be removed. |
| 959 | */ |
| 960 | nlmsvc_unlink_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | } |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 962 | nlmsvc_release_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | } |
| 964 | |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 965 | /* Helper function to handle retry of a deferred block. |
| 966 | * If it is a blocking lock, call grant_blocked. |
| 967 | * For a non-blocking lock or test lock, revisit the request. |
| 968 | */ |
| 969 | static void |
| 970 | retry_deferred_block(struct nlm_block *block) |
| 971 | { |
| 972 | if (!(block->b_flags & B_GOT_CALLBACK)) |
| 973 | block->b_flags |= B_TIMED_OUT; |
| 974 | nlmsvc_insert_block(block, NLM_TIMEOUT); |
| 975 | dprintk("revisit block %p flags %d\n", block, block->b_flags); |
| 976 | if (block->b_deferred_req) { |
| 977 | block->b_deferred_req->revisit(block->b_deferred_req, 0); |
| 978 | block->b_deferred_req = NULL; |
| 979 | } |
| 980 | } |
| 981 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | /* |
| 983 | * Retry all blocked locks that have been notified. This is where lockd |
| 984 | * picks up locks that can be granted, or grant notifications that must |
| 985 | * be retransmitted. |
| 986 | */ |
| 987 | unsigned long |
| 988 | nlmsvc_retry_blocked(void) |
| 989 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 990 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; |
| 991 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 993 | spin_lock(&nlm_blocked_lock); |
Jeff Layton | d751a7c | 2008-02-07 16:34:55 -0500 | [diff] [blame] | 994 | while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 995 | block = list_entry(nlm_blocked.next, struct nlm_block, b_list); |
| 996 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | if (block->b_when == NLM_NEVER) |
| 998 | break; |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 999 | if (time_after(block->b_when, jiffies)) { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1000 | timeout = block->b_when - jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | break; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1002 | } |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1003 | spin_unlock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1004 | |
J. Bruce Fields | f3d43c7 | 2006-08-03 15:07:47 -0400 | [diff] [blame] | 1005 | dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", |
| 1006 | block, block->b_when); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 1007 | if (block->b_flags & B_QUEUED) { |
| 1008 | dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", |
| 1009 | block, block->b_granted, block->b_flags); |
| 1010 | retry_deferred_block(block); |
| 1011 | } else |
| 1012 | nlmsvc_grant_blocked(block); |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1013 | spin_lock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | } |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1015 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1017 | return timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | } |