Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/lockd/svclock.c |
| 4 | * |
| 5 | * Handling of server-side locks, mostly of the blocked variety. |
| 6 | * This is the ugliest part of lockd because we tread on very thin ice. |
| 7 | * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. |
| 8 | * IMNSHO introducing the grant callback into the NLM protocol was one |
| 9 | * of the worst ideas Sun ever had. Except maybe for the idea of doing |
| 10 | * NFS file locking at all. |
| 11 | * |
| 12 | * I'm trying hard to avoid race conditions by protecting most accesses |
| 13 | * to a file's list of blocked locks through a semaphore. The global |
| 14 | * list of blocked locks is not protected in this fashion however. |
| 15 | * Therefore, some functions (such as the RPC callback for the async grant |
| 16 | * call) move blocked locks towards the head of the list *while some other |
| 17 | * process might be traversing it*. This should not be a problem in |
| 18 | * practice, because this will only cause functions traversing the list |
| 19 | * to visit some blocks twice. |
| 20 | * |
| 21 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
| 22 | */ |
| 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/errno.h> |
| 27 | #include <linux/kernel.h> |
| 28 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/sunrpc/clnt.h> |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 30 | #include <linux/sunrpc/svc_xprt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/lockd/nlm.h> |
| 32 | #include <linux/lockd/lockd.h> |
Jeff Layton | d751a7c | 2008-02-07 16:34:55 -0500 | [diff] [blame] | 33 | #include <linux/kthread.h> |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 34 | #include <linux/exportfs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | #define NLMDBG_FACILITY NLMDBG_SVCLOCK |
| 37 | |
| 38 | #ifdef CONFIG_LOCKD_V4 |
| 39 | #define nlm_deadlock nlm4_deadlock |
| 40 | #else |
| 41 | #define nlm_deadlock nlm_lck_denied |
| 42 | #endif |
| 43 | |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 44 | static void nlmsvc_release_block(struct nlm_block *block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 46 | static void nlmsvc_remove_block(struct nlm_block *block); |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 47 | |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 48 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); |
| 49 | static void nlmsvc_freegrantargs(struct nlm_rqst *call); |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 50 | static const struct rpc_call_ops nlmsvc_grant_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * The list of blocked locks to retry |
| 54 | */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 55 | static LIST_HEAD(nlm_blocked); |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 56 | static DEFINE_SPINLOCK(nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Jeff Layton | 10b8956 | 2014-11-17 16:58:03 -0500 | [diff] [blame] | 58 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
Trond Myklebust | ffa94db | 2012-03-20 09:22:00 -0400 | [diff] [blame] | 59 | static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) |
| 60 | { |
| 61 | /* |
Jeff Layton | 3c51991 | 2015-01-22 08:19:32 -0500 | [diff] [blame] | 62 | * We can get away with a static buffer because this is only called |
| 63 | * from lockd, which is single-threaded. |
Trond Myklebust | ffa94db | 2012-03-20 09:22:00 -0400 | [diff] [blame] | 64 | */ |
| 65 | static char buf[2*NLM_MAXCOOKIELEN+1]; |
| 66 | unsigned int i, len = sizeof(buf); |
| 67 | char *p = buf; |
| 68 | |
| 69 | len--; /* allow for trailing \0 */ |
| 70 | if (len < 3) |
| 71 | return "???"; |
| 72 | for (i = 0 ; i < cookie->len ; i++) { |
| 73 | if (len < 2) { |
| 74 | strcpy(p-3, "..."); |
| 75 | break; |
| 76 | } |
| 77 | sprintf(p, "%02x", cookie->data[i]); |
| 78 | p += 2; |
| 79 | len -= 2; |
| 80 | } |
| 81 | *p = '\0'; |
| 82 | |
| 83 | return buf; |
| 84 | } |
| 85 | #endif |
| 86 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | /* |
| 88 | * Insert a blocked lock into the global list |
| 89 | */ |
| 90 | static void |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 91 | nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 93 | struct nlm_block *b; |
| 94 | struct list_head *pos; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 97 | if (list_empty(&block->b_list)) { |
| 98 | kref_get(&block->b_count); |
| 99 | } else { |
| 100 | list_del_init(&block->b_list); |
| 101 | } |
| 102 | |
| 103 | pos = &nlm_blocked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | if (when != NLM_NEVER) { |
| 105 | if ((when += jiffies) == NLM_NEVER) |
| 106 | when ++; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 107 | list_for_each(pos, &nlm_blocked) { |
| 108 | b = list_entry(pos, struct nlm_block, b_list); |
| 109 | if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) |
| 110 | break; |
| 111 | } |
| 112 | /* On normal exit from the loop, pos == &nlm_blocked, |
| 113 | * so we will be adding to the end of the list - good |
| 114 | */ |
| 115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 117 | list_add_tail(&block->b_list, pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | block->b_when = when; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 121 | static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) |
| 122 | { |
| 123 | spin_lock(&nlm_blocked_lock); |
| 124 | nlmsvc_insert_block_locked(block, when); |
| 125 | spin_unlock(&nlm_blocked_lock); |
| 126 | } |
| 127 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* |
| 129 | * Remove a block from the global list |
| 130 | */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 131 | static inline void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | nlmsvc_remove_block(struct nlm_block *block) |
| 133 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 134 | if (!list_empty(&block->b_list)) { |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 135 | spin_lock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 136 | list_del_init(&block->b_list); |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 137 | spin_unlock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 138 | nlmsvc_release_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 143 | * Find a block for a given lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | */ |
| 145 | static struct nlm_block * |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 146 | nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 148 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | struct file_lock *fl; |
| 150 | |
| 151 | dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", |
| 152 | file, lock->fl.fl_pid, |
| 153 | (long long)lock->fl.fl_start, |
| 154 | (long long)lock->fl.fl_end, lock->fl.fl_type); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 155 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 156 | fl = &block->b_call->a_args.lock.fl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", |
| 158 | block->b_file, fl->fl_pid, |
| 159 | (long long)fl->fl_start, |
| 160 | (long long)fl->fl_end, fl->fl_type, |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 161 | nlmdbg_cookie2a(&block->b_call->a_args.cookie)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 163 | kref_get(&block->b_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | return block; |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | return NULL; |
| 169 | } |
| 170 | |
| 171 | static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) |
| 172 | { |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 173 | if (a->len != b->len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | return 0; |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 175 | if (memcmp(a->data, b->data, a->len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | return 0; |
| 177 | return 1; |
| 178 | } |
| 179 | |
| 180 | /* |
| 181 | * Find a block with a given NLM cookie. |
| 182 | */ |
| 183 | static inline struct nlm_block * |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 184 | nlmsvc_find_block(struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | { |
| 186 | struct nlm_block *block; |
| 187 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 188 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 189 | if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 190 | goto found; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
| 192 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 193 | return NULL; |
| 194 | |
| 195 | found: |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 196 | dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 197 | kref_get(&block->b_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | return block; |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * Create a block and initialize it. |
| 203 | * |
| 204 | * Note: we explicitly set the cookie of the grant reply to that of |
| 205 | * the blocked lock request. The spec explicitly mentions that the client |
| 206 | * should _not_ rely on the callback containing the same cookie as the |
| 207 | * request, but (as I found out later) that's because some implementations |
| 208 | * do just this. Never mind the standards comittees, they support our |
| 209 | * logging industries. |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 210 | * |
| 211 | * 10 years later: I hope we can safely ignore these old and broken |
| 212 | * clients by now. Let's fix this so we can uniquely identify an incoming |
| 213 | * GRANTED_RES message by cookie, without having to rely on the client's IP |
| 214 | * address. --okir |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | */ |
Trond Myklebust | 255129d | 2007-09-25 15:55:03 -0400 | [diff] [blame] | 216 | static struct nlm_block * |
| 217 | nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, |
| 218 | struct nlm_file *file, struct nlm_lock *lock, |
| 219 | struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | { |
| 221 | struct nlm_block *block; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 222 | struct nlm_rqst *call = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 224 | call = nlm_alloc_call(host); |
| 225 | if (call == NULL) |
| 226 | return NULL; |
| 227 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | /* Allocate memory for block, and initialize arguments */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 229 | block = kzalloc(sizeof(*block), GFP_KERNEL); |
| 230 | if (block == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | goto failed; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 232 | kref_init(&block->b_count); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 233 | INIT_LIST_HEAD(&block->b_list); |
| 234 | INIT_LIST_HEAD(&block->b_flist); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 236 | if (!nlmsvc_setgrantargs(call, lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | goto failed_free; |
| 238 | |
| 239 | /* Set notifier function for VFS, and init args */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 240 | call->a_args.lock.fl.fl_flags |= FL_SLEEP; |
| 241 | call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 242 | nlmclnt_next_cookie(&call->a_args.cookie); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
| 244 | dprintk("lockd: created block %p...\n", block); |
| 245 | |
| 246 | /* Create and initialize the block */ |
| 247 | block->b_daemon = rqstp->rq_server; |
| 248 | block->b_host = host; |
| 249 | block->b_file = file; |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 250 | file->f_count++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | |
| 252 | /* Add to file's list of blocks */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 253 | list_add(&block->b_flist, &file->f_blocks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
| 255 | /* Set up RPC arguments for callback */ |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 256 | block->b_call = call; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | call->a_flags = RPC_TASK_ASYNC; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 258 | call->a_block = block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
| 260 | return block; |
| 261 | |
| 262 | failed_free: |
| 263 | kfree(block); |
| 264 | failed: |
Chuck Lever | 7db836d | 2010-12-14 15:05:42 +0000 | [diff] [blame] | 265 | nlmsvc_release_call(call); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | return NULL; |
| 267 | } |
| 268 | |
| 269 | /* |
J. Bruce Fields | 3c61eec | 2008-04-07 13:05:27 -0400 | [diff] [blame] | 270 | * Delete a block. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | * It is the caller's responsibility to check whether the file |
| 272 | * can be closed hereafter. |
| 273 | */ |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 274 | static int nlmsvc_unlink_block(struct nlm_block *block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | { |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 276 | int status; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 277 | dprintk("lockd: unlinking block %p...\n", block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | |
| 279 | /* Remove block from list */ |
NeilBrown | cb03f94 | 2018-11-30 10:04:08 +1100 | [diff] [blame] | 280 | status = locks_delete_block(&block->b_call->a_args.lock.fl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | nlmsvc_remove_block(block); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 282 | return status; |
| 283 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 285 | static void nlmsvc_free_block(struct kref *kref) |
| 286 | { |
| 287 | struct nlm_block *block = container_of(kref, struct nlm_block, b_count); |
| 288 | struct nlm_file *file = block->b_file; |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 289 | |
| 290 | dprintk("lockd: freeing block %p...\n", block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
| 292 | /* Remove block from file's list of blocks */ |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 293 | list_del_init(&block->b_flist); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 294 | mutex_unlock(&file->f_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 296 | nlmsvc_freegrantargs(block->b_call); |
Chuck Lever | 7db836d | 2010-12-14 15:05:42 +0000 | [diff] [blame] | 297 | nlmsvc_release_call(block->b_call); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 298 | nlm_release_file(block->b_file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | kfree(block); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | static void nlmsvc_release_block(struct nlm_block *block) |
| 303 | { |
| 304 | if (block != NULL) |
Al Viro | c5aa1e5 | 2012-08-29 09:00:01 -0400 | [diff] [blame] | 305 | kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
| 307 | |
Olaf Kirch | f2af793 | 2006-10-04 02:15:59 -0700 | [diff] [blame] | 308 | /* |
| 309 | * Loop over all blocks and delete blocks held by |
| 310 | * a matching host. |
| 311 | */ |
| 312 | void nlmsvc_traverse_blocks(struct nlm_host *host, |
| 313 | struct nlm_file *file, |
| 314 | nlm_host_match_fn_t match) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 315 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 316 | struct nlm_block *block, *next; |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 317 | |
| 318 | restart: |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 319 | mutex_lock(&file->f_mutex); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 320 | list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { |
Olaf Kirch | f2af793 | 2006-10-04 02:15:59 -0700 | [diff] [blame] | 321 | if (!match(block->b_host, host)) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 322 | continue; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 323 | /* Do not destroy blocks that are not on |
| 324 | * the global retry list - why? */ |
| 325 | if (list_empty(&block->b_list)) |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 326 | continue; |
| 327 | kref_get(&block->b_count); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 328 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 329 | nlmsvc_unlink_block(block); |
| 330 | nlmsvc_release_block(block); |
| 331 | goto restart; |
| 332 | } |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 333 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 334 | } |
| 335 | |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 336 | static struct nlm_lockowner * |
| 337 | nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) |
| 338 | { |
| 339 | refcount_inc(&lockowner->count); |
| 340 | return lockowner; |
| 341 | } |
| 342 | |
| 343 | static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) |
| 344 | { |
| 345 | if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) |
| 346 | return; |
| 347 | list_del(&lockowner->list); |
| 348 | spin_unlock(&lockowner->host->h_lock); |
| 349 | nlmsvc_release_host(lockowner->host); |
| 350 | kfree(lockowner); |
| 351 | } |
| 352 | |
| 353 | static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) |
| 354 | { |
| 355 | struct nlm_lockowner *lockowner; |
| 356 | list_for_each_entry(lockowner, &host->h_lockowners, list) { |
| 357 | if (lockowner->pid != pid) |
| 358 | continue; |
| 359 | return nlmsvc_get_lockowner(lockowner); |
| 360 | } |
| 361 | return NULL; |
| 362 | } |
| 363 | |
| 364 | static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) |
| 365 | { |
| 366 | struct nlm_lockowner *res, *new = NULL; |
| 367 | |
| 368 | spin_lock(&host->h_lock); |
| 369 | res = __nlmsvc_find_lockowner(host, pid); |
| 370 | |
| 371 | if (res == NULL) { |
| 372 | spin_unlock(&host->h_lock); |
| 373 | new = kmalloc(sizeof(*res), GFP_KERNEL); |
| 374 | spin_lock(&host->h_lock); |
| 375 | res = __nlmsvc_find_lockowner(host, pid); |
| 376 | if (res == NULL && new != NULL) { |
| 377 | res = new; |
| 378 | /* fs/locks.c will manage the refcount through lock_ops */ |
| 379 | refcount_set(&new->count, 1); |
| 380 | new->pid = pid; |
| 381 | new->host = nlm_get_host(host); |
| 382 | list_add(&new->list, &host->h_lockowners); |
| 383 | new = NULL; |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | spin_unlock(&host->h_lock); |
| 388 | kfree(new); |
| 389 | return res; |
| 390 | } |
| 391 | |
| 392 | void |
| 393 | nlmsvc_release_lockowner(struct nlm_lock *lock) |
| 394 | { |
| 395 | if (lock->fl.fl_owner) |
| 396 | nlmsvc_put_lockowner(lock->fl.fl_owner); |
| 397 | } |
| 398 | |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 399 | void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, |
| 400 | pid_t pid) |
| 401 | { |
| 402 | fl->fl_owner = nlmsvc_find_lockowner(host, pid); |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 403 | } |
| 404 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | /* |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 406 | * Initialize arguments for GRANTED call. The nlm_rqst structure |
| 407 | * has been cleared already. |
| 408 | */ |
| 409 | static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) |
| 410 | { |
| 411 | locks_copy_lock(&call->a_args.lock.fl, &lock->fl); |
| 412 | memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); |
Serge E. Hallyn | e9ff399 | 2006-10-02 02:18:11 -0700 | [diff] [blame] | 413 | call->a_args.lock.caller = utsname()->nodename; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 414 | call->a_args.lock.oh.len = lock->oh.len; |
| 415 | |
| 416 | /* set default data area */ |
| 417 | call->a_args.lock.oh.data = call->a_owner; |
Benjamin Coddington | 646d73e | 2019-05-23 10:45:47 -0400 | [diff] [blame] | 418 | call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 419 | |
| 420 | if (lock->oh.len > NLMCLNT_OHSIZE) { |
| 421 | void *data = kmalloc(lock->oh.len, GFP_KERNEL); |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 422 | if (!data) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 423 | return 0; |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 424 | call->a_args.lock.oh.data = (u8 *) data; |
| 425 | } |
| 426 | |
| 427 | memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); |
| 428 | return 1; |
| 429 | } |
| 430 | |
| 431 | static void nlmsvc_freegrantargs(struct nlm_rqst *call) |
| 432 | { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 433 | if (call->a_args.lock.oh.data != call->a_owner) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 434 | kfree(call->a_args.lock.oh.data); |
Felix Blyakher | a9e61e2 | 2009-03-31 15:12:56 -0500 | [diff] [blame] | 435 | |
| 436 | locks_release_private(&call->a_args.lock.fl); |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 437 | } |
| 438 | |
| 439 | /* |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 440 | * Deferred lock request handling for non-blocking lock |
| 441 | */ |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 442 | static __be32 |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 443 | nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) |
| 444 | { |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 445 | __be32 status = nlm_lck_denied_nolocks; |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 446 | |
| 447 | block->b_flags |= B_QUEUED; |
| 448 | |
| 449 | nlmsvc_insert_block(block, NLM_TIMEOUT); |
| 450 | |
| 451 | block->b_cache_req = &rqstp->rq_chandle; |
| 452 | if (rqstp->rq_chandle.defer) { |
| 453 | block->b_deferred_req = |
| 454 | rqstp->rq_chandle.defer(block->b_cache_req); |
| 455 | if (block->b_deferred_req != NULL) |
| 456 | status = nlm_drop_reply; |
| 457 | } |
| 458 | dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", |
Al Viro | ca5c8cd | 2007-07-26 17:33:49 +0100 | [diff] [blame] | 459 | block, block->b_flags, ntohl(status)); |
Marc Eshel | 2b36f41 | 2006-11-28 16:26:47 -0500 | [diff] [blame] | 460 | |
| 461 | return status; |
| 462 | } |
| 463 | |
| 464 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | * Attempt to establish a lock, and if it can't be granted, block it |
| 466 | * if required. |
| 467 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 468 | __be32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, |
Jeff Layton | 6cde4de | 2008-07-15 14:26:17 -0400 | [diff] [blame] | 470 | struct nlm_host *host, struct nlm_lock *lock, int wait, |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 471 | struct nlm_cookie *cookie, int reclaim) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | { |
J. Bruce Fields | 40595cd | 2021-12-16 12:20:13 -0500 | [diff] [blame] | 473 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 474 | struct inode *inode = nlmsvc_file_inode(file); |
J. Bruce Fields | 40595cd | 2021-12-16 12:20:13 -0500 | [diff] [blame] | 475 | #endif |
| 476 | struct nlm_block *block = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | int error; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 478 | int mode; |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 479 | int async_block = 0; |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 480 | __be32 ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | |
| 482 | dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 483 | inode->i_sb->s_id, inode->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | lock->fl.fl_type, lock->fl.fl_pid, |
| 485 | (long long)lock->fl.fl_start, |
| 486 | (long long)lock->fl.fl_end, |
| 487 | wait); |
| 488 | |
J. Bruce Fields | 40595cd | 2021-12-16 12:20:13 -0500 | [diff] [blame] | 489 | if (nlmsvc_file_file(file)->f_op->lock) { |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 490 | async_block = wait; |
| 491 | wait = 0; |
| 492 | } |
| 493 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | /* Lock file against concurrent access */ |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 495 | mutex_lock(&file->f_mutex); |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 496 | /* Get existing block (in case client is busy-waiting) |
| 497 | * or create new block |
| 498 | */ |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 499 | block = nlmsvc_lookup_block(file, lock); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 500 | if (block == NULL) { |
J. Bruce Fields | 560de0e | 2008-07-15 15:05:45 -0400 | [diff] [blame] | 501 | block = nlmsvc_create_block(rqstp, host, file, lock, cookie); |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 502 | ret = nlm_lck_denied_nolocks; |
| 503 | if (block == NULL) |
| 504 | goto out; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 505 | lock = &block->b_call->a_args.lock; |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 506 | } else |
| 507 | lock->fl.fl_flags &= ~FL_SLEEP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 509 | if (block->b_flags & B_QUEUED) { |
| 510 | dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", |
| 511 | block, block->b_flags); |
| 512 | if (block->b_granted) { |
| 513 | nlmsvc_unlink_block(block); |
| 514 | ret = nlm_granted; |
| 515 | goto out; |
| 516 | } |
| 517 | if (block->b_flags & B_TIMED_OUT) { |
| 518 | nlmsvc_unlink_block(block); |
| 519 | ret = nlm_lck_denied; |
| 520 | goto out; |
| 521 | } |
| 522 | ret = nlm_drop_reply; |
| 523 | goto out; |
| 524 | } |
| 525 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 526 | if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 527 | ret = nlm_lck_denied_grace_period; |
| 528 | goto out; |
| 529 | } |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 530 | if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { |
J. Bruce Fields | d22b1cf | 2008-02-06 15:05:12 -0500 | [diff] [blame] | 531 | ret = nlm_lck_denied_grace_period; |
| 532 | goto out; |
| 533 | } |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 534 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 535 | if (!wait) |
| 536 | lock->fl.fl_flags &= ~FL_SLEEP; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 537 | mode = lock_to_openmode(&lock->fl); |
| 538 | error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 539 | lock->fl.fl_flags &= ~FL_SLEEP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 541 | dprintk("lockd: vfs_lock_file returned %d\n", error); |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 542 | switch (error) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | case 0: |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 544 | ret = nlm_granted; |
| 545 | goto out; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 546 | case -EAGAIN: |
Miklos Szeredi | e33d1ea | 2009-02-09 12:30:43 -0500 | [diff] [blame] | 547 | /* |
| 548 | * If this is a blocking request for an |
| 549 | * already pending lock request then we need |
| 550 | * to put it back on lockd's block list |
| 551 | */ |
| 552 | if (wait) |
| 553 | break; |
J. Bruce Fields | b840be2 | 2021-08-20 17:02:05 -0400 | [diff] [blame] | 554 | ret = async_block ? nlm_lck_blocked : nlm_lck_denied; |
Miklos Szeredi | e33d1ea | 2009-02-09 12:30:43 -0500 | [diff] [blame] | 555 | goto out; |
Miklos Szeredi | bde74e4 | 2008-07-25 01:48:57 -0700 | [diff] [blame] | 556 | case FILE_LOCK_DEFERRED: |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 557 | if (wait) |
| 558 | break; |
| 559 | /* Filesystem lock operation is in progress |
| 560 | Add it to the queue waiting for callback */ |
| 561 | ret = nlmsvc_defer_lock_rqst(rqstp, block); |
| 562 | goto out; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 563 | case -EDEADLK: |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 564 | ret = nlm_deadlock; |
| 565 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | default: /* includes ENOLCK */ |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 567 | ret = nlm_lck_denied_nolocks; |
| 568 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 571 | ret = nlm_lck_blocked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | |
| 573 | /* Append to list of blocked */ |
Marc Eshel | f812048 | 2006-12-05 23:48:10 -0500 | [diff] [blame] | 574 | nlmsvc_insert_block(block, NLM_NEVER); |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 575 | out: |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 576 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 577 | nlmsvc_release_block(block); |
Andy Adamson | 15dadef | 2006-03-20 13:44:24 -0500 | [diff] [blame] | 578 | dprintk("lockd: nlmsvc_lock returned %u\n", ret); |
| 579 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | /* |
| 583 | * Test for presence of a conflicting lock. |
| 584 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 585 | __be32 |
Marc Eshel | 85f3f1b3 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 586 | nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, |
Jeff Layton | 8f920d5 | 2008-07-15 14:06:48 -0400 | [diff] [blame] | 587 | struct nlm_host *host, struct nlm_lock *lock, |
| 588 | struct nlm_lock *conflock, struct nlm_cookie *cookie) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | { |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 590 | int error; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 591 | int mode; |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 592 | __be32 ret; |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 593 | struct nlm_lockowner *test_owner; |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 594 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", |
J. Bruce Fields | a81041b | 2021-08-23 11:26:39 -0400 | [diff] [blame] | 596 | nlmsvc_file_inode(file)->i_sb->s_id, |
| 597 | nlmsvc_file_inode(file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | lock->fl.fl_type, |
| 599 | (long long)lock->fl.fl_start, |
| 600 | (long long)lock->fl.fl_end); |
| 601 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 602 | if (locks_in_grace(SVC_NET(rqstp))) { |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 603 | ret = nlm_lck_denied_grace_period; |
| 604 | goto out; |
| 605 | } |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 606 | |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 607 | /* If there's a conflicting lock, remember to clean up the test lock */ |
| 608 | test_owner = (struct nlm_lockowner *)lock->fl.fl_owner; |
| 609 | |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 610 | mode = lock_to_openmode(&lock->fl); |
| 611 | error = vfs_test_lock(file->f_file[mode], &lock->fl); |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 612 | if (error) { |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 613 | /* We can't currently deal with deferred test requests */ |
| 614 | if (error == FILE_LOCK_DEFERRED) |
| 615 | WARN_ON_ONCE(1); |
| 616 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 617 | ret = nlm_lck_denied_nolocks; |
| 618 | goto out; |
| 619 | } |
Jeff Layton | 09802fd | 2014-08-22 10:18:44 -0400 | [diff] [blame] | 620 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 621 | if (lock->fl.fl_type == F_UNLCK) { |
| 622 | ret = nlm_granted; |
| 623 | goto out; |
| 624 | } |
| 625 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 626 | dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", |
| 627 | lock->fl.fl_type, (long long)lock->fl.fl_start, |
| 628 | (long long)lock->fl.fl_end); |
| 629 | conflock->caller = "somehost"; /* FIXME */ |
| 630 | conflock->len = strlen(conflock->caller); |
| 631 | conflock->oh.len = 0; /* don't return OH info */ |
Benjamin Coddington | cd2d644 | 2021-07-26 09:33:28 -0400 | [diff] [blame] | 632 | conflock->svid = lock->fl.fl_pid; |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 633 | conflock->fl.fl_type = lock->fl.fl_type; |
| 634 | conflock->fl.fl_start = lock->fl.fl_start; |
| 635 | conflock->fl.fl_end = lock->fl.fl_end; |
Kinglong Mee | f328296 | 2014-08-22 10:18:43 -0400 | [diff] [blame] | 636 | locks_release_private(&lock->fl); |
Benjamin Coddington | 89e0edf | 2019-05-23 10:45:45 -0400 | [diff] [blame] | 637 | |
| 638 | /* Clean up the test lock */ |
| 639 | lock->fl.fl_owner = NULL; |
| 640 | nlmsvc_put_lockowner(test_owner); |
| 641 | |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 642 | ret = nlm_lck_denied; |
| 643 | out: |
Marc Eshel | 5ea0d75 | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 644 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | /* |
| 648 | * Remove a lock. |
| 649 | * This implies a CANCEL call: We send a GRANT_MSG, the client replies |
| 650 | * with a GRANT_RES call which gets lost, and calls UNLOCK immediately |
| 651 | * afterwards. In this case the block will still be there, and hence |
| 652 | * must be removed. |
| 653 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 654 | __be32 |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 655 | nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | { |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 657 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | |
| 659 | dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", |
J. Bruce Fields | a81041b | 2021-08-23 11:26:39 -0400 | [diff] [blame] | 660 | nlmsvc_file_inode(file)->i_sb->s_id, |
| 661 | nlmsvc_file_inode(file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | lock->fl.fl_pid, |
| 663 | (long long)lock->fl.fl_start, |
| 664 | (long long)lock->fl.fl_end); |
| 665 | |
| 666 | /* First, cancel any lock that might be there */ |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 667 | nlmsvc_cancel_blocked(net, file, lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | |
| 669 | lock->fl.fl_type = F_UNLCK; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 670 | if (file->f_file[O_RDONLY]) |
| 671 | error = vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, |
| 672 | &lock->fl, NULL); |
| 673 | if (file->f_file[O_WRONLY]) |
| 674 | error = vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, |
| 675 | &lock->fl, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | |
| 677 | return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * Cancel a previously blocked request. |
| 682 | * |
| 683 | * A cancel request always overrides any grant that may currently |
| 684 | * be in progress. |
| 685 | * The calling procedure must check whether the file can be closed. |
| 686 | */ |
Al Viro | 52921e0 | 2006-10-19 23:28:46 -0700 | [diff] [blame] | 687 | __be32 |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 688 | nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | { |
| 690 | struct nlm_block *block; |
J. Bruce Fields | 64a318e | 2006-01-03 09:55:46 +0100 | [diff] [blame] | 691 | int status = 0; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 692 | int mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | |
| 694 | dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", |
J. Bruce Fields | a81041b | 2021-08-23 11:26:39 -0400 | [diff] [blame] | 695 | nlmsvc_file_inode(file)->i_sb->s_id, |
| 696 | nlmsvc_file_inode(file)->i_ino, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | lock->fl.fl_pid, |
| 698 | (long long)lock->fl.fl_start, |
| 699 | (long long)lock->fl.fl_end); |
| 700 | |
Stanislav Kinsbursky | 5ccb006 | 2012-07-25 16:57:22 +0400 | [diff] [blame] | 701 | if (locks_in_grace(net)) |
J. Bruce Fields | b2b5028 | 2008-02-06 13:59:23 -0500 | [diff] [blame] | 702 | return nlm_lck_denied_grace_period; |
| 703 | |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 704 | mutex_lock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 705 | block = nlmsvc_lookup_block(file, lock); |
Neil Brown | 89e63ef | 2006-10-04 02:16:06 -0700 | [diff] [blame] | 706 | mutex_unlock(&file->f_mutex); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 707 | if (block != NULL) { |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 708 | mode = lock_to_openmode(&lock->fl); |
| 709 | vfs_cancel_lock(block->b_file->f_file[mode], |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 710 | &block->b_call->a_args.lock.fl); |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 711 | status = nlmsvc_unlink_block(block); |
| 712 | nlmsvc_release_block(block); |
| 713 | } |
J. Bruce Fields | 64a318e | 2006-01-03 09:55:46 +0100 | [diff] [blame] | 714 | return status ? nlm_lck_denied : nlm_granted; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | } |
| 716 | |
| 717 | /* |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 718 | * This is a callback from the filesystem for VFS file lock requests. |
J. Bruce Fields | 8fb47a4 | 2011-07-20 20:21:59 -0400 | [diff] [blame] | 719 | * It will be used if lm_grant is defined and the filesystem can not |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 720 | * respond to the request immediately. |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 721 | * For SETLK or SETLKW request it will get the local posix lock. |
| 722 | * In all cases it will move the block to the head of nlm_blocked q where |
| 723 | * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the |
| 724 | * deferred rpc for GETLK and SETLK. |
| 725 | */ |
| 726 | static void |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 727 | nlmsvc_update_deferred_block(struct nlm_block *block, int result) |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 728 | { |
| 729 | block->b_flags |= B_GOT_CALLBACK; |
| 730 | if (result == 0) |
| 731 | block->b_granted = 1; |
| 732 | else |
| 733 | block->b_flags |= B_TIMED_OUT; |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 734 | } |
| 735 | |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 736 | static int nlmsvc_grant_deferred(struct file_lock *fl, int result) |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 737 | { |
| 738 | struct nlm_block *block; |
| 739 | int rc = -ENOENT; |
| 740 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 741 | spin_lock(&nlm_blocked_lock); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 742 | list_for_each_entry(block, &nlm_blocked, b_list) { |
| 743 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
| 744 | dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", |
| 745 | block, block->b_flags); |
| 746 | if (block->b_flags & B_QUEUED) { |
| 747 | if (block->b_flags & B_TIMED_OUT) { |
| 748 | rc = -ENOLCK; |
| 749 | break; |
| 750 | } |
Joe Perches | d0449b9 | 2014-08-22 10:18:42 -0400 | [diff] [blame] | 751 | nlmsvc_update_deferred_block(block, result); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 752 | } else if (result == 0) |
| 753 | block->b_granted = 1; |
| 754 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 755 | nlmsvc_insert_block_locked(block, 0); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 756 | svc_wake_up(block->b_daemon); |
| 757 | rc = 0; |
| 758 | break; |
| 759 | } |
| 760 | } |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 761 | spin_unlock(&nlm_blocked_lock); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 762 | if (rc == -ENOENT) |
| 763 | printk(KERN_WARNING "lockd: grant for unknown block\n"); |
| 764 | return rc; |
| 765 | } |
| 766 | |
| 767 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | * Unblock a blocked lock request. This is a callback invoked from the |
| 769 | * VFS layer when a lock on which we blocked is removed. |
| 770 | * |
| 771 | * This function doesn't grant the blocked lock instantly, but rather moves |
| 772 | * the block to the head of nlm_blocked where it can be picked up by lockd. |
| 773 | */ |
| 774 | static void |
| 775 | nlmsvc_notify_blocked(struct file_lock *fl) |
| 776 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 777 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | |
| 779 | dprintk("lockd: VFS unblock notification for block %p\n", fl); |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 780 | spin_lock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 781 | list_for_each_entry(block, &nlm_blocked, b_list) { |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 782 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 783 | nlmsvc_insert_block_locked(block, 0); |
| 784 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | svc_wake_up(block->b_daemon); |
| 786 | return; |
| 787 | } |
| 788 | } |
J. Bruce Fields | a282a1f | 2010-10-26 18:25:30 -0400 | [diff] [blame] | 789 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | printk(KERN_WARNING "lockd: notification for unknown block!\n"); |
| 791 | } |
| 792 | |
J. Bruce Fields | 7de875b | 2021-08-20 17:01:59 -0400 | [diff] [blame] | 793 | static fl_owner_t nlmsvc_get_owner(fl_owner_t owner) |
| 794 | { |
| 795 | return nlmsvc_get_lockowner(owner); |
| 796 | } |
| 797 | |
| 798 | static void nlmsvc_put_owner(fl_owner_t owner) |
| 799 | { |
| 800 | nlmsvc_put_lockowner(owner); |
| 801 | } |
| 802 | |
Alexey Dobriyan | 7b02196 | 2009-09-21 17:01:12 -0700 | [diff] [blame] | 803 | const struct lock_manager_operations nlmsvc_lock_operations = { |
J. Bruce Fields | 8fb47a4 | 2011-07-20 20:21:59 -0400 | [diff] [blame] | 804 | .lm_notify = nlmsvc_notify_blocked, |
| 805 | .lm_grant = nlmsvc_grant_deferred, |
J. Bruce Fields | 7de875b | 2021-08-20 17:01:59 -0400 | [diff] [blame] | 806 | .lm_get_owner = nlmsvc_get_owner, |
| 807 | .lm_put_owner = nlmsvc_put_owner, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | }; |
| 809 | |
| 810 | /* |
| 811 | * Try to claim a lock that was previously blocked. |
| 812 | * |
| 813 | * Note that we use both the RPC_GRANTED_MSG call _and_ an async |
| 814 | * RPC thread when notifying the client. This seems like overkill... |
| 815 | * Here's why: |
| 816 | * - we don't want to use a synchronous RPC thread, otherwise |
| 817 | * we might find ourselves hanging on a dead portmapper. |
| 818 | * - Some lockd implementations (e.g. HP) don't react to |
| 819 | * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. |
| 820 | */ |
| 821 | static void |
| 822 | nlmsvc_grant_blocked(struct nlm_block *block) |
| 823 | { |
| 824 | struct nlm_file *file = block->b_file; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 825 | struct nlm_lock *lock = &block->b_call->a_args.lock; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 826 | int mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | int error; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 828 | loff_t fl_start, fl_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | |
| 830 | dprintk("lockd: grant blocked lock %p\n", block); |
| 831 | |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 832 | kref_get(&block->b_count); |
| 833 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | /* Unlink block request from list */ |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 835 | nlmsvc_unlink_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | |
| 837 | /* If b_granted is true this means we've been here before. |
| 838 | * Just retry the grant callback, possibly refreshing the RPC |
| 839 | * binding */ |
| 840 | if (block->b_granted) { |
| 841 | nlm_rebind_host(block->b_host); |
| 842 | goto callback; |
| 843 | } |
| 844 | |
| 845 | /* Try the lock operation again */ |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 846 | /* vfs_lock_file() can mangle fl_start and fl_end, but we need |
| 847 | * them unchanged for the GRANT_MSG |
| 848 | */ |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 849 | lock->fl.fl_flags |= FL_SLEEP; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 850 | fl_start = lock->fl.fl_start; |
| 851 | fl_end = lock->fl.fl_end; |
J. Bruce Fields | 7f024fc | 2021-08-23 16:44:00 -0400 | [diff] [blame] | 852 | mode = lock_to_openmode(&lock->fl); |
| 853 | error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 854 | lock->fl.fl_flags &= ~FL_SLEEP; |
NeilBrown | 2ec197d | 2014-02-07 17:10:26 +1100 | [diff] [blame] | 855 | lock->fl.fl_start = fl_start; |
| 856 | lock->fl.fl_end = fl_end; |
Trond Myklebust | 09c7938 | 2006-03-20 13:44:38 -0500 | [diff] [blame] | 857 | |
Andy Adamson | 5de0e50 | 2006-03-20 13:44:25 -0500 | [diff] [blame] | 858 | switch (error) { |
| 859 | case 0: |
| 860 | break; |
Miklos Szeredi | bde74e4 | 2008-07-25 01:48:57 -0700 | [diff] [blame] | 861 | case FILE_LOCK_DEFERRED: |
Marc Eshel | 1a8322b | 2006-11-28 16:27:06 -0500 | [diff] [blame] | 862 | dprintk("lockd: lock still blocked error %d\n", error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | nlmsvc_insert_block(block, NLM_NEVER); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 864 | nlmsvc_release_block(block); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 865 | return; |
Andy Adamson | 5de0e50 | 2006-03-20 13:44:25 -0500 | [diff] [blame] | 866 | default: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", |
Harvey Harrison | 8e24eea | 2008-04-30 00:55:09 -0700 | [diff] [blame] | 868 | -error, __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | nlmsvc_insert_block(block, 10 * HZ); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 870 | nlmsvc_release_block(block); |
Trond Myklebust | d9f6eb75 | 2006-03-20 13:44:47 -0500 | [diff] [blame] | 871 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | } |
| 873 | |
| 874 | callback: |
| 875 | /* Lock was granted by VFS. */ |
| 876 | dprintk("lockd: GRANTing blocked lock.\n"); |
| 877 | block->b_granted = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | |
Jeff Layton | 9706501 | 2008-02-06 11:34:12 -0500 | [diff] [blame] | 879 | /* keep block on the list, but don't reattempt until the RPC |
| 880 | * completes or the submission fails |
| 881 | */ |
| 882 | nlmsvc_insert_block(block, NLM_NEVER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | |
Jeff Layton | 9706501 | 2008-02-06 11:34:12 -0500 | [diff] [blame] | 884 | /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked |
| 885 | * will queue up a new one if this one times out |
| 886 | */ |
| 887 | error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, |
| 888 | &nlmsvc_grant_ops); |
| 889 | |
| 890 | /* RPC submission failed, wait a bit and retry */ |
| 891 | if (error < 0) |
| 892 | nlmsvc_insert_block(block, 10 * HZ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | } |
| 894 | |
| 895 | /* |
| 896 | * This is the callback from the RPC layer when the NLM_GRANTED_MSG |
| 897 | * RPC call has succeeded or timed out. |
| 898 | * Like all RPC callbacks, it is invoked by the rpciod process, so it |
| 899 | * better not sleep. Therefore, we put the blocked lock on the nlm_blocked |
| 900 | * chain once more in order to have it removed by lockd itself (which can |
| 901 | * then sleep on the file semaphore without disrupting e.g. the nfs client). |
| 902 | */ |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 903 | static void nlmsvc_grant_callback(struct rpc_task *task, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | { |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 905 | struct nlm_rqst *call = data; |
Trond Myklebust | 9273723 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 906 | struct nlm_block *block = call->a_block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | unsigned long timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | |
| 909 | dprintk("lockd: GRANT_MSG RPC callback\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 911 | spin_lock(&nlm_blocked_lock); |
Jeff Layton | c64e80d | 2008-02-06 11:34:13 -0500 | [diff] [blame] | 912 | /* if the block is not on a list at this point then it has |
| 913 | * been invalidated. Don't try to requeue it. |
| 914 | * |
| 915 | * FIXME: it's possible that the block is removed from the list |
| 916 | * after this check but before the nlmsvc_insert_block. In that |
| 917 | * case it will be added back. Perhaps we need better locking |
| 918 | * for nlm_blocked? |
| 919 | */ |
| 920 | if (list_empty(&block->b_list)) |
Trond Myklebust | a86dc49 | 2008-06-11 13:37:09 -0400 | [diff] [blame] | 921 | goto out; |
Jeff Layton | c64e80d | 2008-02-06 11:34:13 -0500 | [diff] [blame] | 922 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | /* Technically, we should down the file semaphore here. Since we |
| 924 | * move the block towards the head of the queue only, no harm |
| 925 | * can be done, though. */ |
| 926 | if (task->tk_status < 0) { |
| 927 | /* RPC error: Re-insert for retransmission */ |
| 928 | timeout = 10 * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | } else { |
| 930 | /* Call was successful, now wait for client callback */ |
| 931 | timeout = 60 * HZ; |
| 932 | } |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 933 | nlmsvc_insert_block_locked(block, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | svc_wake_up(block->b_daemon); |
Trond Myklebust | a86dc49 | 2008-06-11 13:37:09 -0400 | [diff] [blame] | 935 | out: |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 936 | spin_unlock(&nlm_blocked_lock); |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 937 | } |
| 938 | |
Bryan Schumaker | f904be9 | 2010-09-21 16:38:12 -0400 | [diff] [blame] | 939 | /* |
| 940 | * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an |
| 941 | * .rpc_release rpc_call_op |
| 942 | */ |
Adrian Bunk | ec535ce | 2006-04-18 13:21:50 -0400 | [diff] [blame] | 943 | static void nlmsvc_grant_release(void *data) |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 944 | { |
Trond Myklebust | 6041b79 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 945 | struct nlm_rqst *call = data; |
Trond Myklebust | 6041b79 | 2006-03-20 13:44:45 -0500 | [diff] [blame] | 946 | nlmsvc_release_block(call->a_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | } |
| 948 | |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 949 | static const struct rpc_call_ops nlmsvc_grant_ops = { |
| 950 | .rpc_call_done = nlmsvc_grant_callback, |
Trond Myklebust | 5e1abf8 | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 951 | .rpc_release = nlmsvc_grant_release, |
Trond Myklebust | 963d8fe | 2006-01-03 09:55:04 +0100 | [diff] [blame] | 952 | }; |
| 953 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | /* |
| 955 | * We received a GRANT_RES callback. Try to find the corresponding |
| 956 | * block. |
| 957 | */ |
| 958 | void |
Al Viro | e8c5c04 | 2006-12-13 00:35:03 -0800 | [diff] [blame] | 959 | nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | { |
| 961 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | |
Olaf Kirch | 39be450 | 2006-10-04 02:16:03 -0700 | [diff] [blame] | 963 | dprintk("grant_reply: looking for cookie %x, s=%d \n", |
| 964 | *(unsigned int *)(cookie->data), status); |
| 965 | if (!(block = nlmsvc_find_block(cookie))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | |
Colin Ian King | e56efe9 | 2017-04-08 18:09:59 +0100 | [diff] [blame] | 968 | if (status == nlm_lck_denied_grace_period) { |
| 969 | /* Try again in a couple of seconds */ |
| 970 | nlmsvc_insert_block(block, 10 * HZ); |
| 971 | } else { |
| 972 | /* |
| 973 | * Lock is now held by client, or has been rejected. |
| 974 | * In both cases, the block should be removed. |
| 975 | */ |
| 976 | nlmsvc_unlink_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | } |
Trond Myklebust | 6849c0c | 2006-03-20 13:44:39 -0500 | [diff] [blame] | 978 | nlmsvc_release_block(block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | } |
| 980 | |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 981 | /* Helper function to handle retry of a deferred block. |
| 982 | * If it is a blocking lock, call grant_blocked. |
| 983 | * For a non-blocking lock or test lock, revisit the request. |
| 984 | */ |
| 985 | static void |
| 986 | retry_deferred_block(struct nlm_block *block) |
| 987 | { |
| 988 | if (!(block->b_flags & B_GOT_CALLBACK)) |
| 989 | block->b_flags |= B_TIMED_OUT; |
| 990 | nlmsvc_insert_block(block, NLM_TIMEOUT); |
| 991 | dprintk("revisit block %p flags %d\n", block, block->b_flags); |
| 992 | if (block->b_deferred_req) { |
| 993 | block->b_deferred_req->revisit(block->b_deferred_req, 0); |
| 994 | block->b_deferred_req = NULL; |
| 995 | } |
| 996 | } |
| 997 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | /* |
| 999 | * Retry all blocked locks that have been notified. This is where lockd |
| 1000 | * picks up locks that can be granted, or grant notifications that must |
| 1001 | * be retransmitted. |
| 1002 | */ |
| 1003 | unsigned long |
| 1004 | nlmsvc_retry_blocked(void) |
| 1005 | { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1006 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; |
| 1007 | struct nlm_block *block; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1009 | spin_lock(&nlm_blocked_lock); |
Jeff Layton | d751a7c | 2008-02-07 16:34:55 -0500 | [diff] [blame] | 1010 | while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1011 | block = list_entry(nlm_blocked.next, struct nlm_block, b_list); |
| 1012 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | if (block->b_when == NLM_NEVER) |
| 1014 | break; |
J. Bruce Fields | 6d7bbbb | 2008-07-15 14:38:32 -0400 | [diff] [blame] | 1015 | if (time_after(block->b_when, jiffies)) { |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1016 | timeout = block->b_when - jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | break; |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1018 | } |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1019 | spin_unlock(&nlm_blocked_lock); |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1020 | |
J. Bruce Fields | f3d43c7 | 2006-08-03 15:07:47 -0400 | [diff] [blame] | 1021 | dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", |
| 1022 | block, block->b_when); |
Marc Eshel | 0e4ac9d | 2006-11-28 16:26:51 -0500 | [diff] [blame] | 1023 | if (block->b_flags & B_QUEUED) { |
| 1024 | dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", |
| 1025 | block, block->b_granted, block->b_flags); |
| 1026 | retry_deferred_block(block); |
| 1027 | } else |
| 1028 | nlmsvc_grant_blocked(block); |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1029 | spin_lock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | } |
David Jeffery | 1c327d9 | 2013-07-10 13:19:50 -0400 | [diff] [blame] | 1031 | spin_unlock(&nlm_blocked_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | |
Olaf Kirch | 68a2d76 | 2006-10-04 02:15:57 -0700 | [diff] [blame] | 1033 | return timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | } |