blob: 8c07a070e2b60efe3d9a4ed5027e446a1563c03f [file] [log] [blame]
Mike Marshall1182fca2015-07-17 10:38:15 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050016#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
Mike Marshall1182fca2015-07-17 10:38:15 -040018
Al Viroade3d782016-01-21 22:58:58 -050019static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *);
20static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
21
Mike Marshall1182fca2015-07-17 10:38:15 -040022/*
23 * What we do in this function is to walk the list of operations that are
24 * present in the request queue and mark them as purged.
25 * NOTE: This is called from the device close after client-core has
26 * guaranteed that no new operations could appear on the list since the
27 * client-core is anyway going to exit.
28 */
29void purge_waiting_ops(void)
30{
Yi Liu8bb8aef2015-11-24 15:12:14 -050031 struct orangefs_kernel_op_s *op;
Mike Marshall1182fca2015-07-17 10:38:15 -040032
Yi Liu8bb8aef2015-11-24 15:12:14 -050033 spin_lock(&orangefs_request_list_lock);
34 list_for_each_entry(op, &orangefs_request_list, list) {
Mike Marshall1182fca2015-07-17 10:38:15 -040035 gossip_debug(GOSSIP_WAIT_DEBUG,
36 "pvfs2-client-core: purging op tag %llu %s\n",
37 llu(op->tag),
38 get_opname_string(op));
39 spin_lock(&op->lock);
40 set_op_state_purged(op);
41 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -040042 }
Yi Liu8bb8aef2015-11-24 15:12:14 -050043 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -040044}
45
Al Virofc916da2016-01-19 12:26:13 -050046static inline void
47add_op_to_request_list(struct orangefs_kernel_op_s *op)
48{
49 spin_lock(&orangefs_request_list_lock);
50 spin_lock(&op->lock);
51 set_op_state_waiting(op);
52 list_add_tail(&op->list, &orangefs_request_list);
53 spin_unlock(&orangefs_request_list_lock);
54 spin_unlock(&op->lock);
55 wake_up_interruptible(&orangefs_request_list_waitq);
56}
57
58static inline
59void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
60{
61 spin_lock(&orangefs_request_list_lock);
62 spin_lock(&op->lock);
63 set_op_state_waiting(op);
64
65 list_add(&op->list, &orangefs_request_list);
66 spin_unlock(&orangefs_request_list_lock);
67 spin_unlock(&op->lock);
68 wake_up_interruptible(&orangefs_request_list_waitq);
69}
70
Mike Marshall1182fca2015-07-17 10:38:15 -040071/*
Yi Liu8bb8aef2015-11-24 15:12:14 -050072 * submits a ORANGEFS operation and waits for it to complete
Mike Marshall1182fca2015-07-17 10:38:15 -040073 *
74 * Note op->downcall.status will contain the status of the operation (in
75 * errno format), whether provided by pvfs2-client or a result of failure to
76 * service the operation. If the caller wishes to distinguish, then
77 * op->state can be checked to see if it was serviced or not.
78 *
79 * Returns contents of op->downcall.status for convenience
80 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050081int service_operation(struct orangefs_kernel_op_s *op,
Mike Marshall1182fca2015-07-17 10:38:15 -040082 const char *op_name,
83 int flags)
84{
85 /* flags to modify behavior */
86 sigset_t orig_sigset;
87 int ret = 0;
88
Mike Marshallce6c4142015-12-14 14:54:46 -050089 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -040090
91 op->upcall.tgid = current->tgid;
92 op->upcall.pid = current->pid;
93
94retry_servicing:
95 op->downcall.status = 0;
96 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -050097 "orangefs: service_operation: %s %p\n",
Mike Marshall1182fca2015-07-17 10:38:15 -040098 op_name,
99 op);
100 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500101 "orangefs: operation posted by process: %s, pid: %i\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400102 current->comm,
103 current->pid);
104
105 /* mask out signals if this operation is not to be interrupted */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500106 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100107 orangefs_block_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400108
Yi Liu8bb8aef2015-11-24 15:12:14 -0500109 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400110 ret = mutex_lock_interruptible(&request_mutex);
111 /*
112 * check to see if we were interrupted while waiting for
113 * semaphore
114 */
115 if (ret < 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500116 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100117 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400118 op->downcall.status = ret;
119 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500120 "orangefs: service_operation interrupted.\n");
Mike Marshall1182fca2015-07-17 10:38:15 -0400121 return ret;
122 }
123 }
124
125 gossip_debug(GOSSIP_WAIT_DEBUG,
126 "%s:About to call is_daemon_in_service().\n",
127 __func__);
128
129 if (is_daemon_in_service() < 0) {
130 /*
131 * By incrementing the per-operation attempt counter, we
132 * directly go into the timeout logic while waiting for
133 * the matching downcall to be read
134 */
135 gossip_debug(GOSSIP_WAIT_DEBUG,
136 "%s:client core is NOT in service(%d).\n",
137 __func__,
138 is_daemon_in_service());
139 op->attempts++;
140 }
141
142 /* queue up the operation */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500143 if (flags & ORANGEFS_OP_PRIORITY) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400144 add_priority_op_to_request_list(op);
145 } else {
146 gossip_debug(GOSSIP_WAIT_DEBUG,
147 "%s:About to call add_op_to_request_list().\n",
148 __func__);
149 add_op_to_request_list(op);
150 }
151
Yi Liu8bb8aef2015-11-24 15:12:14 -0500152 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
Mike Marshall1182fca2015-07-17 10:38:15 -0400153 mutex_unlock(&request_mutex);
154
155 /*
156 * If we are asked to service an asynchronous operation from
157 * VFS perspective, we are done.
158 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500159 if (flags & ORANGEFS_OP_ASYNC)
Mike Marshall1182fca2015-07-17 10:38:15 -0400160 return 0;
161
Yi Liu8bb8aef2015-11-24 15:12:14 -0500162 if (flags & ORANGEFS_OP_CANCELLATION) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400163 gossip_debug(GOSSIP_WAIT_DEBUG,
164 "%s:"
165 "About to call wait_for_cancellation_downcall.\n",
166 __func__);
167 ret = wait_for_cancellation_downcall(op);
168 } else {
169 ret = wait_for_matching_downcall(op);
170 }
171
172 if (ret < 0) {
173 /* failed to get matching downcall */
174 if (ret == -ETIMEDOUT) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500175 gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400176 op_name);
177 }
178 op->downcall.status = ret;
179 } else {
180 /* got matching downcall; make sure status is in errno format */
181 op->downcall.status =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500182 orangefs_normalize_to_errno(op->downcall.status);
Mike Marshall1182fca2015-07-17 10:38:15 -0400183 ret = op->downcall.status;
184 }
185
Yi Liu8bb8aef2015-11-24 15:12:14 -0500186 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100187 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400188
189 BUG_ON(ret != op->downcall.status);
190 /* retry if operation has not been serviced and if requested */
191 if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
192 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500193 "orangefs: tag %llu (%s)"
Mike Marshall1182fca2015-07-17 10:38:15 -0400194 " -- operation to be retried (%d attempt)\n",
195 llu(op->tag),
196 op_name,
197 op->attempts + 1);
198
199 if (!op->uses_shared_memory)
200 /*
201 * this operation doesn't use the shared memory
202 * system
203 */
204 goto retry_servicing;
205
206 /* op uses shared memory */
Martin Brandenburg7d221482016-01-04 15:05:28 -0500207 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400208 /*
209 * This operation uses the shared memory system AND
210 * the system is not yet ready. This situation occurs
211 * when the client-core is restarted AND there were
212 * operations waiting to be processed or were already
213 * in process.
214 */
215 gossip_debug(GOSSIP_WAIT_DEBUG,
216 "uses_shared_memory is true.\n");
217 gossip_debug(GOSSIP_WAIT_DEBUG,
218 "Client core in-service status(%d).\n",
219 is_daemon_in_service());
220 gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500221 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400222 gossip_debug(GOSSIP_WAIT_DEBUG,
223 "operation's status is 0x%0x.\n",
224 op->op_state);
225
226 /*
227 * let process sleep for a few seconds so shared
228 * memory system can be initialized.
229 */
Mike Marshallce6c4142015-12-14 14:54:46 -0500230 prepare_to_wait(&orangefs_bufmap_init_waitq,
231 &wait_entry,
232 TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400233
Mike Marshall1182fca2015-07-17 10:38:15 -0400234 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500235 * Wait for orangefs_bufmap_initialize() to wake me up
Mike Marshall1182fca2015-07-17 10:38:15 -0400236 * within the allotted time.
237 */
238 ret = schedule_timeout(MSECS_TO_JIFFIES
Yi Liu8bb8aef2015-11-24 15:12:14 -0500239 (1000 * ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS));
Mike Marshall1182fca2015-07-17 10:38:15 -0400240
241 gossip_debug(GOSSIP_WAIT_DEBUG,
242 "Value returned from schedule_timeout:"
243 "%d.\n",
244 ret);
245 gossip_debug(GOSSIP_WAIT_DEBUG,
246 "Is shared memory available? (%d).\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500247 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400248
Mike Marshallce6c4142015-12-14 14:54:46 -0500249 finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400250
Martin Brandenburg7d221482016-01-04 15:05:28 -0500251 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400252 gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
253 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500254 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
Mike Marshall1182fca2015-07-17 10:38:15 -0400255 get_opname_string(op));
256 return -EIO;
257 }
258
259 /*
260 * Return to the calling function and re-populate a
261 * shared memory buffer.
262 */
263 return -EAGAIN;
264 }
265 }
266
267 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500268 "orangefs: service_operation %s returning: %d for %p.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400269 op_name,
270 ret,
271 op);
272 return ret;
273}
274
Al Viroe07db0a2016-01-21 22:21:41 -0500275static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400276{
277 /*
278 * handle interrupted cases depending on what state we were in when
279 * the interruption is detected. there is a coarse grained lock
280 * across the operation.
281 *
282 * NOTE: be sure not to reverse lock ordering by locking an op lock
283 * while holding the request_list lock. Here, we first lock the op
284 * and then lock the appropriate list.
285 */
286 if (!op) {
287 gossip_debug(GOSSIP_WAIT_DEBUG,
288 "%s: op is null, ignoring\n",
289 __func__);
290 return;
291 }
292
293 /*
294 * one more sanity check, make sure it's in one of the possible states
295 * or don't try to cancel it
296 */
297 if (!(op_state_waiting(op) ||
298 op_state_in_progress(op) ||
299 op_state_serviced(op) ||
300 op_state_purged(op))) {
301 gossip_debug(GOSSIP_WAIT_DEBUG,
302 "%s: op %p not in a valid state (%0x), "
303 "ignoring\n",
304 __func__,
305 op,
306 op->op_state);
307 return;
308 }
309
310 spin_lock(&op->lock);
Al Viroed42fe02016-01-22 19:47:47 -0500311 op->op_state |= OP_VFS_STATE_GIVEN_UP;
Mike Marshall1182fca2015-07-17 10:38:15 -0400312
313 if (op_state_waiting(op)) {
314 /*
315 * upcall hasn't been read; remove op from upcall request
316 * list.
317 */
318 spin_unlock(&op->lock);
Al Viroed42fe02016-01-22 19:47:47 -0500319 spin_lock(&orangefs_request_list_lock);
320 list_del(&op->list);
321 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400322 gossip_debug(GOSSIP_WAIT_DEBUG,
323 "Interrupted: Removed op %p from request_list\n",
324 op);
325 } else if (op_state_in_progress(op)) {
326 /* op must be removed from the in progress htable */
327 spin_unlock(&op->lock);
328 spin_lock(&htable_ops_in_progress_lock);
329 list_del(&op->list);
330 spin_unlock(&htable_ops_in_progress_lock);
331 gossip_debug(GOSSIP_WAIT_DEBUG,
332 "Interrupted: Removed op %p"
333 " from htable_ops_in_progress\n",
334 op);
335 } else if (!op_state_serviced(op)) {
336 spin_unlock(&op->lock);
337 gossip_err("interrupted operation is in a weird state 0x%x\n",
338 op->op_state);
Mike Marshall84d02152015-07-28 13:27:51 -0400339 } else {
340 /*
341 * It is not intended for execution to flow here,
342 * but having this unlock here makes sparse happy.
343 */
344 gossip_err("%s: can't get here.\n", __func__);
345 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400346 }
347}
348
349/*
350 * sleeps on waitqueue waiting for matching downcall.
351 * if client-core finishes servicing, then we are good to go.
352 * else if client-core exits, we get woken up here, and retry with a timeout
353 *
354 * Post when this call returns to the caller, the specified op will no
355 * longer be on any list or htable.
356 *
357 * Returns 0 on success and -errno on failure
358 * Errors are:
359 * EAGAIN in case we want the caller to requeue and try again..
360 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
361 * operation since client-core seems to be exiting too often
362 * or if we were interrupted.
363 */
Al Virob7ae37b2016-01-21 22:58:58 -0500364static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400365{
366 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500367 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400368
369 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400370 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500371 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400372 if (op_state_serviced(op)) {
373 spin_unlock(&op->lock);
374 ret = 0;
375 break;
376 }
377 spin_unlock(&op->lock);
378
Al Viro70c6ea22016-01-23 13:04:19 -0500379 if (unlikely(signal_pending(current))) {
380 gossip_debug(GOSSIP_WAIT_DEBUG,
381 "*** %s:"
382 " operation interrupted by a signal (tag "
383 "%llu, op %p)\n",
384 __func__,
385 llu(op->tag),
386 op);
387 orangefs_clean_up_interrupted_operation(op);
388 ret = -EINTR;
389 break;
390 }
391
392 /*
393 * if this was our first attempt and client-core
394 * has not purged our operation, we are happy to
395 * simply wait
396 */
397 spin_lock(&op->lock);
398 if (op->attempts == 0 && !op_state_purged(op)) {
399 spin_unlock(&op->lock);
400 schedule();
401 } else {
402 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400403 /*
Al Viro70c6ea22016-01-23 13:04:19 -0500404 * subsequent attempts, we retry exactly once
405 * with timeouts
Mike Marshall1182fca2015-07-17 10:38:15 -0400406 */
Al Viro70c6ea22016-01-23 13:04:19 -0500407 if (!schedule_timeout(MSECS_TO_JIFFIES
408 (1000 * op_timeout_secs))) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400409 gossip_debug(GOSSIP_WAIT_DEBUG,
410 "*** %s:"
Al Viro70c6ea22016-01-23 13:04:19 -0500411 " operation timed out (tag"
412 " %llu, %p, att %d)\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400413 __func__,
414 llu(op->tag),
415 op,
416 op->attempts);
Al Viro70c6ea22016-01-23 13:04:19 -0500417 ret = -ETIMEDOUT;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500418 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400419 break;
420 }
Mike Marshall1182fca2015-07-17 10:38:15 -0400421 }
Al Viro70c6ea22016-01-23 13:04:19 -0500422 spin_lock(&op->lock);
423 op->attempts++;
424 /*
425 * if the operation was purged in the meantime, it
426 * is better to requeue it afresh but ensure that
427 * we have not been purged repeatedly. This could
428 * happen if client-core crashes when an op
429 * is being serviced, so we requeue the op, client
430 * core crashes again so we requeue the op, client
431 * core starts, and so on...
432 */
433 if (op_state_purged(op)) {
434 ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
435 -EAGAIN :
436 -EIO;
437 spin_unlock(&op->lock);
438 gossip_debug(GOSSIP_WAIT_DEBUG,
439 "*** %s:"
440 " operation purged (tag "
441 "%llu, %p, att %d)\n",
442 __func__,
443 llu(op->tag),
444 op,
445 op->attempts);
446 orangefs_clean_up_interrupted_operation(op);
447 break;
448 }
449 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400450 }
451
Mike Marshall1182fca2015-07-17 10:38:15 -0400452 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500453 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400454 spin_unlock(&op->lock);
455
456 return ret;
457}
458
459/*
460 * similar to wait_for_matching_downcall(), but used in the special case
461 * of I/O cancellations.
462 *
463 * Note we need a special wait function because if this is called we already
464 * know that a signal is pending in current and need to service the
465 * cancellation upcall anyway. the only way to exit this is to either
466 * timeout or have the cancellation be serviced properly.
467 */
Al Virob7ae37b2016-01-21 22:58:58 -0500468static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400469{
470 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500471 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400472
473 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400474 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500475 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400476 if (op_state_serviced(op)) {
477 gossip_debug(GOSSIP_WAIT_DEBUG,
478 "%s:op-state is SERVICED.\n",
479 __func__);
480 spin_unlock(&op->lock);
481 ret = 0;
482 break;
483 }
484 spin_unlock(&op->lock);
485
486 if (signal_pending(current)) {
487 gossip_debug(GOSSIP_WAIT_DEBUG,
488 "%s:operation interrupted by a signal (tag"
489 " %llu, op %p)\n",
490 __func__,
491 llu(op->tag),
492 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500493 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400494 ret = -EINTR;
495 break;
496 }
497
498 gossip_debug(GOSSIP_WAIT_DEBUG,
499 "%s:About to call schedule_timeout.\n",
500 __func__);
501 ret =
502 schedule_timeout(MSECS_TO_JIFFIES(1000 * op_timeout_secs));
503
504 gossip_debug(GOSSIP_WAIT_DEBUG,
505 "%s:Value returned from schedule_timeout(%d).\n",
506 __func__,
507 ret);
508 if (!ret) {
509 gossip_debug(GOSSIP_WAIT_DEBUG,
510 "%s:*** operation timed out: %p\n",
511 __func__,
512 op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500513 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400514 ret = -ETIMEDOUT;
515 break;
516 }
517
518 gossip_debug(GOSSIP_WAIT_DEBUG,
519 "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
520 __func__);
521 ret = -ETIMEDOUT;
522 break;
523 }
524
Mike Marshall1182fca2015-07-17 10:38:15 -0400525 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500526 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400527 spin_unlock(&op->lock);
528
529 gossip_debug(GOSSIP_WAIT_DEBUG,
530 "%s:returning ret(%d)\n",
531 __func__,
532 ret);
533
534 return ret;
535}