blob: 3ea1665efdf01f6f01bdb9da8793c3130b65bc30 [file] [log] [blame]
Mike Marshall1182fca2015-07-17 10:38:15 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050016#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
Mike Marshall1182fca2015-07-17 10:38:15 -040018
Al Viroade3d782016-01-21 22:58:58 -050019static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
20
Mike Marshall1182fca2015-07-17 10:38:15 -040021/*
22 * What we do in this function is to walk the list of operations that are
23 * present in the request queue and mark them as purged.
24 * NOTE: This is called from the device close after client-core has
25 * guaranteed that no new operations could appear on the list since the
26 * client-core is anyway going to exit.
27 */
28void purge_waiting_ops(void)
29{
Yi Liu8bb8aef2015-11-24 15:12:14 -050030 struct orangefs_kernel_op_s *op;
Mike Marshall1182fca2015-07-17 10:38:15 -040031
Yi Liu8bb8aef2015-11-24 15:12:14 -050032 spin_lock(&orangefs_request_list_lock);
33 list_for_each_entry(op, &orangefs_request_list, list) {
Mike Marshall1182fca2015-07-17 10:38:15 -040034 gossip_debug(GOSSIP_WAIT_DEBUG,
35 "pvfs2-client-core: purging op tag %llu %s\n",
36 llu(op->tag),
37 get_opname_string(op));
Mike Marshall1182fca2015-07-17 10:38:15 -040038 set_op_state_purged(op);
Mike Marshall1182fca2015-07-17 10:38:15 -040039 }
Yi Liu8bb8aef2015-11-24 15:12:14 -050040 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -040041}
42
Al Virofc916da2016-01-19 12:26:13 -050043static inline void
Al Viro78699e22016-02-11 23:07:19 -050044__add_op_to_request_list(struct orangefs_kernel_op_s *op)
45{
46 spin_lock(&op->lock);
47 set_op_state_waiting(op);
48 list_add_tail(&op->list, &orangefs_request_list);
49 spin_unlock(&op->lock);
50 wake_up_interruptible(&orangefs_request_list_waitq);
51}
52
53static inline void
Al Virofc916da2016-01-19 12:26:13 -050054add_op_to_request_list(struct orangefs_kernel_op_s *op)
55{
56 spin_lock(&orangefs_request_list_lock);
Al Viro78699e22016-02-11 23:07:19 -050057 __add_op_to_request_list(op);
Al Virofc916da2016-01-19 12:26:13 -050058 spin_unlock(&orangefs_request_list_lock);
Al Virofc916da2016-01-19 12:26:13 -050059}
60
61static inline
62void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
63{
64 spin_lock(&orangefs_request_list_lock);
65 spin_lock(&op->lock);
66 set_op_state_waiting(op);
67
68 list_add(&op->list, &orangefs_request_list);
69 spin_unlock(&orangefs_request_list_lock);
70 spin_unlock(&op->lock);
71 wake_up_interruptible(&orangefs_request_list_waitq);
72}
73
Mike Marshall1182fca2015-07-17 10:38:15 -040074/*
Yi Liu8bb8aef2015-11-24 15:12:14 -050075 * submits a ORANGEFS operation and waits for it to complete
Mike Marshall1182fca2015-07-17 10:38:15 -040076 *
77 * Note op->downcall.status will contain the status of the operation (in
78 * errno format), whether provided by pvfs2-client or a result of failure to
79 * service the operation. If the caller wishes to distinguish, then
80 * op->state can be checked to see if it was serviced or not.
81 *
82 * Returns contents of op->downcall.status for convenience
83 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050084int service_operation(struct orangefs_kernel_op_s *op,
Mike Marshall1182fca2015-07-17 10:38:15 -040085 const char *op_name,
86 int flags)
87{
88 /* flags to modify behavior */
89 sigset_t orig_sigset;
90 int ret = 0;
91
Mike Marshallce6c4142015-12-14 14:54:46 -050092 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -040093
94 op->upcall.tgid = current->tgid;
95 op->upcall.pid = current->pid;
96
97retry_servicing:
98 op->downcall.status = 0;
99 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500100 "orangefs: service_operation: %s %p\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400101 op_name,
102 op);
103 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500104 "orangefs: operation posted by process: %s, pid: %i\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400105 current->comm,
106 current->pid);
107
108 /* mask out signals if this operation is not to be interrupted */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500109 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100110 orangefs_block_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400111
Yi Liu8bb8aef2015-11-24 15:12:14 -0500112 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400113 ret = mutex_lock_interruptible(&request_mutex);
114 /*
115 * check to see if we were interrupted while waiting for
116 * semaphore
117 */
118 if (ret < 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500119 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100120 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400121 op->downcall.status = ret;
122 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500123 "orangefs: service_operation interrupted.\n");
Mike Marshall1182fca2015-07-17 10:38:15 -0400124 return ret;
125 }
126 }
127
128 gossip_debug(GOSSIP_WAIT_DEBUG,
129 "%s:About to call is_daemon_in_service().\n",
130 __func__);
131
132 if (is_daemon_in_service() < 0) {
133 /*
134 * By incrementing the per-operation attempt counter, we
135 * directly go into the timeout logic while waiting for
136 * the matching downcall to be read
137 */
138 gossip_debug(GOSSIP_WAIT_DEBUG,
139 "%s:client core is NOT in service(%d).\n",
140 __func__,
141 is_daemon_in_service());
142 op->attempts++;
143 }
144
145 /* queue up the operation */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500146 if (flags & ORANGEFS_OP_PRIORITY) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400147 add_priority_op_to_request_list(op);
148 } else {
149 gossip_debug(GOSSIP_WAIT_DEBUG,
150 "%s:About to call add_op_to_request_list().\n",
151 __func__);
152 add_op_to_request_list(op);
153 }
154
Yi Liu8bb8aef2015-11-24 15:12:14 -0500155 if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
Mike Marshall1182fca2015-07-17 10:38:15 -0400156 mutex_unlock(&request_mutex);
157
158 /*
159 * If we are asked to service an asynchronous operation from
160 * VFS perspective, we are done.
161 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500162 if (flags & ORANGEFS_OP_ASYNC)
Mike Marshall1182fca2015-07-17 10:38:15 -0400163 return 0;
164
Al Viro78699e22016-02-11 23:07:19 -0500165 ret = wait_for_matching_downcall(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400166
167 if (ret < 0) {
168 /* failed to get matching downcall */
169 if (ret == -ETIMEDOUT) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500170 gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400171 op_name);
172 }
173 op->downcall.status = ret;
174 } else {
175 /* got matching downcall; make sure status is in errno format */
176 op->downcall.status =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500177 orangefs_normalize_to_errno(op->downcall.status);
Mike Marshall1182fca2015-07-17 10:38:15 -0400178 ret = op->downcall.status;
179 }
180
Yi Liu8bb8aef2015-11-24 15:12:14 -0500181 if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
Richard Weinbergerc146c0b2016-01-02 23:04:47 +0100182 orangefs_set_signals(&orig_sigset);
Mike Marshall1182fca2015-07-17 10:38:15 -0400183
184 BUG_ON(ret != op->downcall.status);
185 /* retry if operation has not been serviced and if requested */
186 if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
187 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500188 "orangefs: tag %llu (%s)"
Mike Marshall1182fca2015-07-17 10:38:15 -0400189 " -- operation to be retried (%d attempt)\n",
190 llu(op->tag),
191 op_name,
192 op->attempts + 1);
193
194 if (!op->uses_shared_memory)
195 /*
196 * this operation doesn't use the shared memory
197 * system
198 */
199 goto retry_servicing;
200
201 /* op uses shared memory */
Martin Brandenburg7d221482016-01-04 15:05:28 -0500202 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall6ebcc3f2016-02-04 16:28:31 -0500203 WARN_ON(1);
Mike Marshall1182fca2015-07-17 10:38:15 -0400204 /*
205 * This operation uses the shared memory system AND
206 * the system is not yet ready. This situation occurs
207 * when the client-core is restarted AND there were
208 * operations waiting to be processed or were already
209 * in process.
210 */
211 gossip_debug(GOSSIP_WAIT_DEBUG,
212 "uses_shared_memory is true.\n");
213 gossip_debug(GOSSIP_WAIT_DEBUG,
214 "Client core in-service status(%d).\n",
215 is_daemon_in_service());
216 gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500217 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400218 gossip_debug(GOSSIP_WAIT_DEBUG,
219 "operation's status is 0x%0x.\n",
220 op->op_state);
221
222 /*
223 * let process sleep for a few seconds so shared
224 * memory system can be initialized.
225 */
Mike Marshallce6c4142015-12-14 14:54:46 -0500226 prepare_to_wait(&orangefs_bufmap_init_waitq,
227 &wait_entry,
228 TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400229
Mike Marshall1182fca2015-07-17 10:38:15 -0400230 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500231 * Wait for orangefs_bufmap_initialize() to wake me up
Mike Marshall1182fca2015-07-17 10:38:15 -0400232 * within the allotted time.
233 */
Al Viro727cbfe2016-01-23 13:17:55 -0500234 ret = schedule_timeout(
235 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ);
Mike Marshall1182fca2015-07-17 10:38:15 -0400236
237 gossip_debug(GOSSIP_WAIT_DEBUG,
238 "Value returned from schedule_timeout:"
239 "%d.\n",
240 ret);
241 gossip_debug(GOSSIP_WAIT_DEBUG,
242 "Is shared memory available? (%d).\n",
Martin Brandenburg7d221482016-01-04 15:05:28 -0500243 orangefs_get_bufmap_init());
Mike Marshall1182fca2015-07-17 10:38:15 -0400244
Mike Marshallce6c4142015-12-14 14:54:46 -0500245 finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400246
Martin Brandenburg7d221482016-01-04 15:05:28 -0500247 if (orangefs_get_bufmap_init() == 0) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400248 gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
249 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500250 ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
Mike Marshall1182fca2015-07-17 10:38:15 -0400251 get_opname_string(op));
252 return -EIO;
253 }
254
255 /*
256 * Return to the calling function and re-populate a
257 * shared memory buffer.
258 */
259 return -EAGAIN;
260 }
261 }
262
263 gossip_debug(GOSSIP_WAIT_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500264 "orangefs: service_operation %s returning: %d for %p.\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400265 op_name,
266 ret,
267 op);
268 return ret;
269}
270
Al Viro78699e22016-02-11 23:07:19 -0500271bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
272{
273 u64 tag = op->tag;
274 if (!op_state_in_progress(op))
275 return false;
276
277 op->slot_to_free = op->upcall.req.io.buf_index;
278 memset(&op->upcall, 0, sizeof(op->upcall));
279 memset(&op->downcall, 0, sizeof(op->downcall));
280 op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
281 op->upcall.req.cancel.op_tag = tag;
282 op->downcall.type = ORANGEFS_VFS_OP_INVALID;
283 op->downcall.status = -1;
284 orangefs_new_tag(op);
285
286 spin_lock(&orangefs_request_list_lock);
287 /* orangefs_request_list_lock is enough of a barrier here */
288 if (!__is_daemon_in_service()) {
289 spin_unlock(&orangefs_request_list_lock);
290 return false;
291 }
292 __add_op_to_request_list(op);
293 spin_unlock(&orangefs_request_list_lock);
294
295 gossip_debug(GOSSIP_UTILS_DEBUG,
296 "Attempting ORANGEFS operation cancellation of tag %llu\n",
297 llu(tag));
298 return true;
299}
300
Al Viroe07db0a2016-01-21 22:21:41 -0500301static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400302{
303 /*
304 * handle interrupted cases depending on what state we were in when
305 * the interruption is detected. there is a coarse grained lock
306 * across the operation.
307 *
Al Viroeab9b382016-01-23 13:09:05 -0500308 * Called with op->lock held.
Mike Marshall1182fca2015-07-17 10:38:15 -0400309 */
Al Viroed42fe02016-01-22 19:47:47 -0500310 op->op_state |= OP_VFS_STATE_GIVEN_UP;
Mike Marshall1182fca2015-07-17 10:38:15 -0400311
312 if (op_state_waiting(op)) {
313 /*
314 * upcall hasn't been read; remove op from upcall request
315 * list.
316 */
317 spin_unlock(&op->lock);
Al Viroed42fe02016-01-22 19:47:47 -0500318 spin_lock(&orangefs_request_list_lock);
319 list_del(&op->list);
320 spin_unlock(&orangefs_request_list_lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400321 gossip_debug(GOSSIP_WAIT_DEBUG,
322 "Interrupted: Removed op %p from request_list\n",
323 op);
324 } else if (op_state_in_progress(op)) {
325 /* op must be removed from the in progress htable */
326 spin_unlock(&op->lock);
327 spin_lock(&htable_ops_in_progress_lock);
328 list_del(&op->list);
329 spin_unlock(&htable_ops_in_progress_lock);
330 gossip_debug(GOSSIP_WAIT_DEBUG,
331 "Interrupted: Removed op %p"
332 " from htable_ops_in_progress\n",
333 op);
334 } else if (!op_state_serviced(op)) {
335 spin_unlock(&op->lock);
336 gossip_err("interrupted operation is in a weird state 0x%x\n",
337 op->op_state);
Mike Marshall84d02152015-07-28 13:27:51 -0400338 } else {
339 /*
340 * It is not intended for execution to flow here,
341 * but having this unlock here makes sparse happy.
342 */
343 gossip_err("%s: can't get here.\n", __func__);
344 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400345 }
346}
347
348/*
349 * sleeps on waitqueue waiting for matching downcall.
350 * if client-core finishes servicing, then we are good to go.
351 * else if client-core exits, we get woken up here, and retry with a timeout
352 *
353 * Post when this call returns to the caller, the specified op will no
354 * longer be on any list or htable.
355 *
356 * Returns 0 on success and -errno on failure
357 * Errors are:
358 * EAGAIN in case we want the caller to requeue and try again..
359 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
360 * operation since client-core seems to be exiting too often
361 * or if we were interrupted.
362 */
Al Virob7ae37b2016-01-21 22:58:58 -0500363static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
Mike Marshall1182fca2015-07-17 10:38:15 -0400364{
365 int ret = -EINVAL;
Mike Marshallce6c4142015-12-14 14:54:46 -0500366 DEFINE_WAIT(wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400367
368 while (1) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400369 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500370 prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
Mike Marshall1182fca2015-07-17 10:38:15 -0400371 if (op_state_serviced(op)) {
372 spin_unlock(&op->lock);
373 ret = 0;
374 break;
375 }
Mike Marshall1182fca2015-07-17 10:38:15 -0400376
Al Viro70c6ea22016-01-23 13:04:19 -0500377 if (unlikely(signal_pending(current))) {
378 gossip_debug(GOSSIP_WAIT_DEBUG,
379 "*** %s:"
380 " operation interrupted by a signal (tag "
381 "%llu, op %p)\n",
382 __func__,
383 llu(op->tag),
384 op);
385 orangefs_clean_up_interrupted_operation(op);
386 ret = -EINTR;
387 break;
388 }
389
390 /*
391 * if this was our first attempt and client-core
392 * has not purged our operation, we are happy to
393 * simply wait
394 */
Al Viro70c6ea22016-01-23 13:04:19 -0500395 if (op->attempts == 0 && !op_state_purged(op)) {
396 spin_unlock(&op->lock);
397 schedule();
398 } else {
399 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400400 /*
Al Viro70c6ea22016-01-23 13:04:19 -0500401 * subsequent attempts, we retry exactly once
402 * with timeouts
Mike Marshall1182fca2015-07-17 10:38:15 -0400403 */
Al Viro727cbfe2016-01-23 13:17:55 -0500404 if (!schedule_timeout(op_timeout_secs * HZ)) {
Mike Marshall1182fca2015-07-17 10:38:15 -0400405 gossip_debug(GOSSIP_WAIT_DEBUG,
406 "*** %s:"
Al Viro70c6ea22016-01-23 13:04:19 -0500407 " operation timed out (tag"
408 " %llu, %p, att %d)\n",
Mike Marshall1182fca2015-07-17 10:38:15 -0400409 __func__,
410 llu(op->tag),
411 op,
412 op->attempts);
Al Viro70c6ea22016-01-23 13:04:19 -0500413 ret = -ETIMEDOUT;
Al Viroeab9b382016-01-23 13:09:05 -0500414 spin_lock(&op->lock);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500415 orangefs_clean_up_interrupted_operation(op);
Mike Marshall1182fca2015-07-17 10:38:15 -0400416 break;
417 }
Mike Marshall1182fca2015-07-17 10:38:15 -0400418 }
Al Viro70c6ea22016-01-23 13:04:19 -0500419 spin_lock(&op->lock);
420 op->attempts++;
421 /*
422 * if the operation was purged in the meantime, it
423 * is better to requeue it afresh but ensure that
424 * we have not been purged repeatedly. This could
425 * happen if client-core crashes when an op
426 * is being serviced, so we requeue the op, client
427 * core crashes again so we requeue the op, client
428 * core starts, and so on...
429 */
430 if (op_state_purged(op)) {
431 ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
432 -EAGAIN :
433 -EIO;
Al Viro70c6ea22016-01-23 13:04:19 -0500434 gossip_debug(GOSSIP_WAIT_DEBUG,
435 "*** %s:"
436 " operation purged (tag "
437 "%llu, %p, att %d)\n",
438 __func__,
439 llu(op->tag),
440 op,
441 op->attempts);
442 orangefs_clean_up_interrupted_operation(op);
443 break;
444 }
445 spin_unlock(&op->lock);
Mike Marshall1182fca2015-07-17 10:38:15 -0400446 }
447
Mike Marshall1182fca2015-07-17 10:38:15 -0400448 spin_lock(&op->lock);
Mike Marshallce6c4142015-12-14 14:54:46 -0500449 finish_wait(&op->waitq, &wait_entry);
Mike Marshall1182fca2015-07-17 10:38:15 -0400450 spin_unlock(&op->lock);
451
452 return ret;
453}