Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 2 | /* |
| 3 | * (C) 2001 Clemson University and The University of Chicago |
| 4 | * (C) 2011 Omnibond Systems |
| 5 | * |
| 6 | * Changes by Acxiom Corporation to implement generic service_operation() |
| 7 | * function, Copyright Acxiom Corporation, 2005. |
| 8 | * |
| 9 | * See COPYING in top-level directory. |
| 10 | */ |
| 11 | |
| 12 | /* |
| 13 | * In-kernel waitqueue operations. |
| 14 | */ |
| 15 | |
| 16 | #include "protocol.h" |
Mike Marshall | 575e946 | 2015-12-04 12:56:14 -0500 | [diff] [blame] | 17 | #include "orangefs-kernel.h" |
| 18 | #include "orangefs-bufmap.h" |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 19 | |
Mike Marshall | b1116bc | 2018-06-01 14:17:23 -0400 | [diff] [blame] | 20 | static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, |
| 21 | long timeout, |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 22 | int flags) |
Mike Marshall | b1116bc | 2018-06-01 14:17:23 -0400 | [diff] [blame] | 23 | __acquires(op->lock); |
| 24 | static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) |
| 25 | __releases(op->lock); |
Al Viro | ade3d78 | 2016-01-21 22:58:58 -0500 | [diff] [blame] | 26 | |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 27 | /* |
| 28 | * What we do in this function is to walk the list of operations that are |
| 29 | * present in the request queue and mark them as purged. |
| 30 | * NOTE: This is called from the device close after client-core has |
| 31 | * guaranteed that no new operations could appear on the list since the |
| 32 | * client-core is anyway going to exit. |
| 33 | */ |
| 34 | void purge_waiting_ops(void) |
| 35 | { |
Martin Brandenburg | 0afc0de | 2018-01-22 15:44:51 -0500 | [diff] [blame] | 36 | struct orangefs_kernel_op_s *op, *tmp; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 37 | |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 38 | spin_lock(&orangefs_request_list_lock); |
Martin Brandenburg | 0afc0de | 2018-01-22 15:44:51 -0500 | [diff] [blame] | 39 | list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) { |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 40 | gossip_debug(GOSSIP_WAIT_DEBUG, |
| 41 | "pvfs2-client-core: purging op tag %llu %s\n", |
| 42 | llu(op->tag), |
| 43 | get_opname_string(op)); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 44 | set_op_state_purged(op); |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 45 | gossip_debug(GOSSIP_DEV_DEBUG, |
| 46 | "%s: op:%s: op_state:%d: process:%s:\n", |
| 47 | __func__, |
| 48 | get_opname_string(op), |
| 49 | op->op_state, |
| 50 | current->comm); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 51 | } |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 52 | spin_unlock(&orangefs_request_list_lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | /* |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 56 | * submits a ORANGEFS operation and waits for it to complete |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 57 | * |
| 58 | * Note op->downcall.status will contain the status of the operation (in |
| 59 | * errno format), whether provided by pvfs2-client or a result of failure to |
| 60 | * service the operation. If the caller wishes to distinguish, then |
| 61 | * op->state can be checked to see if it was serviced or not. |
| 62 | * |
| 63 | * Returns contents of op->downcall.status for convenience |
| 64 | */ |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 65 | int service_operation(struct orangefs_kernel_op_s *op, |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 66 | const char *op_name, |
| 67 | int flags) |
| 68 | { |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 69 | long timeout = MAX_SCHEDULE_TIMEOUT; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 70 | int ret = 0; |
| 71 | |
Mike Marshall | ce6c414 | 2015-12-14 14:54:46 -0500 | [diff] [blame] | 72 | DEFINE_WAIT(wait_entry); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 73 | |
| 74 | op->upcall.tgid = current->tgid; |
| 75 | op->upcall.pid = current->pid; |
| 76 | |
| 77 | retry_servicing: |
| 78 | op->downcall.status = 0; |
| 79 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | 5253487 | 2016-02-16 17:09:09 -0500 | [diff] [blame] | 80 | "%s: %s op:%p: process:%s: pid:%d:\n", |
| 81 | __func__, |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 82 | op_name, |
Mike Marshall | 5253487 | 2016-02-16 17:09:09 -0500 | [diff] [blame] | 83 | op, |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 84 | current->comm, |
| 85 | current->pid); |
| 86 | |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 87 | /* |
| 88 | * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 89 | * acquiring the request_mutex because we're servicing a |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 90 | * high priority remount operation and the request_mutex is |
| 91 | * already taken. |
| 92 | */ |
| 93 | if (!(flags & ORANGEFS_OP_NO_MUTEX)) { |
Al Viro | c72f15b | 2016-02-13 10:49:24 -0500 | [diff] [blame] | 94 | if (flags & ORANGEFS_OP_INTERRUPTIBLE) |
Martin Brandenburg | 1d50361 | 2016-08-16 11:38:14 -0400 | [diff] [blame] | 95 | ret = mutex_lock_interruptible(&orangefs_request_mutex); |
Al Viro | c72f15b | 2016-02-13 10:49:24 -0500 | [diff] [blame] | 96 | else |
Martin Brandenburg | 1d50361 | 2016-08-16 11:38:14 -0400 | [diff] [blame] | 97 | ret = mutex_lock_killable(&orangefs_request_mutex); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 98 | /* |
| 99 | * check to see if we were interrupted while waiting for |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 100 | * mutex |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 101 | */ |
| 102 | if (ret < 0) { |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 103 | op->downcall.status = ret; |
| 104 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 105 | "%s: service_operation interrupted.\n", |
| 106 | __func__); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 107 | return ret; |
| 108 | } |
| 109 | } |
| 110 | |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 111 | /* queue up the operation */ |
| 112 | spin_lock(&orangefs_request_list_lock); |
| 113 | spin_lock(&op->lock); |
| 114 | set_op_state_waiting(op); |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 115 | gossip_debug(GOSSIP_DEV_DEBUG, |
| 116 | "%s: op:%s: op_state:%d: process:%s:\n", |
| 117 | __func__, |
| 118 | get_opname_string(op), |
| 119 | op->op_state, |
| 120 | current->comm); |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 121 | /* add high priority remount op to the front of the line. */ |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 122 | if (flags & ORANGEFS_OP_PRIORITY) |
| 123 | list_add(&op->list, &orangefs_request_list); |
| 124 | else |
| 125 | list_add_tail(&op->list, &orangefs_request_list); |
| 126 | spin_unlock(&op->lock); |
| 127 | wake_up_interruptible(&orangefs_request_list_waitq); |
| 128 | if (!__is_daemon_in_service()) { |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 129 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 130 | "%s:client core is NOT in service.\n", |
| 131 | __func__); |
Martin Brandenburg | b5a9d61 | 2017-04-25 15:38:07 -0400 | [diff] [blame] | 132 | /* |
| 133 | * Don't wait for the userspace component to return if |
| 134 | * the filesystem is being umounted anyway. |
| 135 | */ |
| 136 | if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT) |
| 137 | timeout = 0; |
| 138 | else |
| 139 | timeout = op_timeout_secs * HZ; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 140 | } |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 141 | spin_unlock(&orangefs_request_list_lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 142 | |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 143 | if (!(flags & ORANGEFS_OP_NO_MUTEX)) |
Martin Brandenburg | 1d50361 | 2016-08-16 11:38:14 -0400 | [diff] [blame] | 144 | mutex_unlock(&orangefs_request_mutex); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 145 | |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 146 | ret = wait_for_matching_downcall(op, timeout, flags); |
Mike Marshall | 5253487 | 2016-02-16 17:09:09 -0500 | [diff] [blame] | 147 | gossip_debug(GOSSIP_WAIT_DEBUG, |
| 148 | "%s: wait_for_matching_downcall returned %d for %p\n", |
| 149 | __func__, |
| 150 | ret, |
| 151 | op); |
| 152 | |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 153 | /* got matching downcall; make sure status is in errno format */ |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 154 | if (!ret) { |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 155 | spin_unlock(&op->lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 156 | op->downcall.status = |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 157 | orangefs_normalize_to_errno(op->downcall.status); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 158 | ret = op->downcall.status; |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 159 | goto out; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 160 | } |
| 161 | |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 162 | /* failed to get matching downcall */ |
| 163 | if (ret == -ETIMEDOUT) { |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 164 | gossip_err("%s: %s -- wait timed out; aborting attempt.\n", |
| 165 | __func__, |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 166 | op_name); |
| 167 | } |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 168 | |
| 169 | /* |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 170 | * remove a waiting op from the request list or |
| 171 | * remove an in-progress op from the in-progress list. |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 172 | */ |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 173 | orangefs_clean_up_interrupted_operation(op); |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 174 | |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 175 | op->downcall.status = ret; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 176 | /* retry if operation has not been serviced and if requested */ |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 177 | if (ret == -EAGAIN) { |
| 178 | op->attempts++; |
| 179 | timeout = op_timeout_secs * HZ; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 180 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Yi Liu | 8bb8aef | 2015-11-24 15:12:14 -0500 | [diff] [blame] | 181 | "orangefs: tag %llu (%s)" |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 182 | " -- operation to be retried (%d attempt)\n", |
| 183 | llu(op->tag), |
| 184 | op_name, |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 185 | op->attempts); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 186 | |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 187 | /* |
| 188 | * io ops (ops that use the shared memory buffer) have |
| 189 | * to be returned to their caller for a retry. Other ops |
| 190 | * can just be recycled here. |
| 191 | */ |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 192 | if (!op->uses_shared_memory) |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 193 | goto retry_servicing; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 194 | } |
| 195 | |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 196 | out: |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 197 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 198 | "%s: %s returning: %d for %p.\n", |
| 199 | __func__, |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 200 | op_name, |
| 201 | ret, |
| 202 | op); |
| 203 | return ret; |
| 204 | } |
| 205 | |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 206 | /* This can get called on an I/O op if it had a bad service_operation. */ |
Al Viro | 78699e2 | 2016-02-11 23:07:19 -0500 | [diff] [blame] | 207 | bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op) |
| 208 | { |
| 209 | u64 tag = op->tag; |
| 210 | if (!op_state_in_progress(op)) |
| 211 | return false; |
| 212 | |
| 213 | op->slot_to_free = op->upcall.req.io.buf_index; |
| 214 | memset(&op->upcall, 0, sizeof(op->upcall)); |
| 215 | memset(&op->downcall, 0, sizeof(op->downcall)); |
| 216 | op->upcall.type = ORANGEFS_VFS_OP_CANCEL; |
| 217 | op->upcall.req.cancel.op_tag = tag; |
| 218 | op->downcall.type = ORANGEFS_VFS_OP_INVALID; |
| 219 | op->downcall.status = -1; |
| 220 | orangefs_new_tag(op); |
| 221 | |
| 222 | spin_lock(&orangefs_request_list_lock); |
| 223 | /* orangefs_request_list_lock is enough of a barrier here */ |
| 224 | if (!__is_daemon_in_service()) { |
| 225 | spin_unlock(&orangefs_request_list_lock); |
| 226 | return false; |
| 227 | } |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 228 | spin_lock(&op->lock); |
| 229 | set_op_state_waiting(op); |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 230 | gossip_debug(GOSSIP_DEV_DEBUG, |
| 231 | "%s: op:%s: op_state:%d: process:%s:\n", |
| 232 | __func__, |
| 233 | get_opname_string(op), |
| 234 | op->op_state, |
| 235 | current->comm); |
Al Viro | 98815ad | 2016-02-13 10:38:23 -0500 | [diff] [blame] | 236 | list_add(&op->list, &orangefs_request_list); |
| 237 | spin_unlock(&op->lock); |
Al Viro | 78699e2 | 2016-02-11 23:07:19 -0500 | [diff] [blame] | 238 | spin_unlock(&orangefs_request_list_lock); |
| 239 | |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 240 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Al Viro | 78699e2 | 2016-02-11 23:07:19 -0500 | [diff] [blame] | 241 | "Attempting ORANGEFS operation cancellation of tag %llu\n", |
| 242 | llu(tag)); |
| 243 | return true; |
| 244 | } |
| 245 | |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 246 | /* |
| 247 | * Change an op to the "given up" state and remove it from its list. |
| 248 | */ |
| 249 | static void |
| 250 | orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) |
Mike Marshall | b1116bc | 2018-06-01 14:17:23 -0400 | [diff] [blame] | 251 | __releases(op->lock) |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 252 | { |
| 253 | /* |
| 254 | * handle interrupted cases depending on what state we were in when |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 255 | * the interruption is detected. |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 256 | * |
Al Viro | eab9b38 | 2016-01-23 13:09:05 -0500 | [diff] [blame] | 257 | * Called with op->lock held. |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 258 | */ |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 259 | |
| 260 | /* |
| 261 | * List manipulation code elsewhere will ignore ops that |
| 262 | * have been given up upon. |
| 263 | */ |
Al Viro | ed42fe0 | 2016-01-22 19:47:47 -0500 | [diff] [blame] | 264 | op->op_state |= OP_VFS_STATE_GIVEN_UP; |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 265 | |
Al Viro | 05a50a5 | 2016-02-18 18:59:44 -0500 | [diff] [blame] | 266 | if (list_empty(&op->list)) { |
| 267 | /* caught copying to/from daemon */ |
| 268 | BUG_ON(op_state_serviced(op)); |
| 269 | spin_unlock(&op->lock); |
| 270 | wait_for_completion(&op->waitq); |
| 271 | } else if (op_state_waiting(op)) { |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 272 | /* |
| 273 | * upcall hasn't been read; remove op from upcall request |
| 274 | * list. |
| 275 | */ |
| 276 | spin_unlock(&op->lock); |
Al Viro | ed42fe0 | 2016-01-22 19:47:47 -0500 | [diff] [blame] | 277 | spin_lock(&orangefs_request_list_lock); |
Al Viro | 05a50a5 | 2016-02-18 18:59:44 -0500 | [diff] [blame] | 278 | list_del_init(&op->list); |
Al Viro | ed42fe0 | 2016-01-22 19:47:47 -0500 | [diff] [blame] | 279 | spin_unlock(&orangefs_request_list_lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 280 | gossip_debug(GOSSIP_WAIT_DEBUG, |
| 281 | "Interrupted: Removed op %p from request_list\n", |
| 282 | op); |
| 283 | } else if (op_state_in_progress(op)) { |
| 284 | /* op must be removed from the in progress htable */ |
| 285 | spin_unlock(&op->lock); |
Martin Brandenburg | 1d50361 | 2016-08-16 11:38:14 -0400 | [diff] [blame] | 286 | spin_lock(&orangefs_htable_ops_in_progress_lock); |
Al Viro | 05a50a5 | 2016-02-18 18:59:44 -0500 | [diff] [blame] | 287 | list_del_init(&op->list); |
Martin Brandenburg | 1d50361 | 2016-08-16 11:38:14 -0400 | [diff] [blame] | 288 | spin_unlock(&orangefs_htable_ops_in_progress_lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 289 | gossip_debug(GOSSIP_WAIT_DEBUG, |
| 290 | "Interrupted: Removed op %p" |
| 291 | " from htable_ops_in_progress\n", |
| 292 | op); |
Al Viro | 05a50a5 | 2016-02-18 18:59:44 -0500 | [diff] [blame] | 293 | } else { |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 294 | spin_unlock(&op->lock); |
| 295 | gossip_err("interrupted operation is in a weird state 0x%x\n", |
| 296 | op->op_state); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 297 | } |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 298 | reinit_completion(&op->waitq); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | /* |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 302 | * Sleeps on waitqueue waiting for matching downcall. |
| 303 | * If client-core finishes servicing, then we are good to go. |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 304 | * else if client-core exits, we get woken up here, and retry with a timeout |
| 305 | * |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 306 | * When this call returns to the caller, the specified op will no |
| 307 | * longer be in either the in_progress hash table or on the request list. |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 308 | * |
| 309 | * Returns 0 on success and -errno on failure |
| 310 | * Errors are: |
| 311 | * EAGAIN in case we want the caller to requeue and try again.. |
| 312 | * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this |
| 313 | * operation since client-core seems to be exiting too often |
| 314 | * or if we were interrupted. |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 315 | * |
| 316 | * Returns with op->lock taken. |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 317 | */ |
Al Viro | c72f15b | 2016-02-13 10:49:24 -0500 | [diff] [blame] | 318 | static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, |
Mike Marshall | b1116bc | 2018-06-01 14:17:23 -0400 | [diff] [blame] | 319 | long timeout, |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 320 | int flags) |
Mike Marshall | b1116bc | 2018-06-01 14:17:23 -0400 | [diff] [blame] | 321 | __acquires(op->lock) |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 322 | { |
Al Viro | 05b39a8 | 2016-02-13 11:04:19 -0500 | [diff] [blame] | 323 | long n; |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 324 | int writeback = flags & ORANGEFS_OP_WRITEBACK, |
| 325 | interruptible = flags & ORANGEFS_OP_INTERRUPTIBLE; |
Al Viro | c72f15b | 2016-02-13 10:49:24 -0500 | [diff] [blame] | 326 | |
Mike Marshall | ca9f518 | 2016-02-26 10:21:12 -0500 | [diff] [blame] | 327 | /* |
| 328 | * There's a "schedule_timeout" inside of these wait |
| 329 | * primitives, during which the op is out of the hands of the |
| 330 | * user process that needs something done and is being |
| 331 | * manipulated by the client-core process. |
| 332 | */ |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 333 | if (writeback) |
| 334 | n = wait_for_completion_io_timeout(&op->waitq, timeout); |
| 335 | else if (!writeback && interruptible) |
Mike Marshall | adcf34a | 2016-02-24 16:54:27 -0500 | [diff] [blame] | 336 | n = wait_for_completion_interruptible_timeout(&op->waitq, |
Martin Brandenburg | 0dcac0f | 2018-02-15 19:38:01 +0000 | [diff] [blame] | 337 | timeout); |
| 338 | else /* !writeback && !interruptible but compiler complains */ |
Al Viro | c72f15b | 2016-02-13 10:49:24 -0500 | [diff] [blame] | 339 | n = wait_for_completion_killable_timeout(&op->waitq, timeout); |
| 340 | |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 341 | spin_lock(&op->lock); |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 342 | |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 343 | if (op_state_serviced(op)) |
| 344 | return 0; |
| 345 | |
| 346 | if (unlikely(n < 0)) { |
| 347 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 348 | "%s: operation interrupted, tag %llu, %p\n", |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 349 | __func__, |
| 350 | llu(op->tag), |
| 351 | op); |
| 352 | return -EINTR; |
| 353 | } |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 354 | if (op_state_purged(op)) { |
| 355 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 356 | "%s: operation purged, tag %llu, %p, %d\n", |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 357 | __func__, |
| 358 | llu(op->tag), |
| 359 | op, |
| 360 | op->attempts); |
| 361 | return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? |
| 362 | -EAGAIN : |
| 363 | -EIO; |
| 364 | } |
| 365 | /* must have timed out, then... */ |
| 366 | gossip_debug(GOSSIP_WAIT_DEBUG, |
Mike Marshall | 9d9e7ba | 2016-03-03 13:46:48 -0500 | [diff] [blame] | 367 | "%s: operation timed out, tag %llu, %p, %d)\n", |
Al Viro | d2d87a3 | 2016-02-13 10:15:22 -0500 | [diff] [blame] | 368 | __func__, |
| 369 | llu(op->tag), |
| 370 | op, |
| 371 | op->attempts); |
| 372 | return -ETIMEDOUT; |
Mike Marshall | 1182fca | 2015-07-17 10:38:15 -0400 | [diff] [blame] | 373 | } |