Thomas Gleixner | d691005 | 2019-05-22 09:51:29 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved |
| 4 | * Copyright 2001-2006 Ian Kent <raven@themaw.net> |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 7 | #include <linux/sched/signal.h> |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 8 | #include "autofs_i.h" |
| 9 | |
| 10 | /* We make this a static variable rather than a part of the superblock; it |
| 11 | * is better if we don't reassign numbers easily even across filesystems |
| 12 | */ |
| 13 | static autofs_wqt_t autofs_next_wait_queue = 1; |
| 14 | |
| 15 | void autofs_catatonic_mode(struct autofs_sb_info *sbi) |
| 16 | { |
| 17 | struct autofs_wait_queue *wq, *nwq; |
| 18 | |
| 19 | mutex_lock(&sbi->wq_mutex); |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 20 | if (sbi->flags & AUTOFS_SBI_CATATONIC) { |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 21 | mutex_unlock(&sbi->wq_mutex); |
| 22 | return; |
| 23 | } |
| 24 | |
| 25 | pr_debug("entering catatonic mode\n"); |
| 26 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 27 | sbi->flags |= AUTOFS_SBI_CATATONIC; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 28 | wq = sbi->queues; |
| 29 | sbi->queues = NULL; /* Erase all wait queues */ |
| 30 | while (wq) { |
| 31 | nwq = wq->next; |
| 32 | wq->status = -ENOENT; /* Magic is gone - report failure */ |
| 33 | kfree(wq->name.name); |
| 34 | wq->name.name = NULL; |
| 35 | wq->wait_ctr--; |
| 36 | wake_up_interruptible(&wq->queue); |
| 37 | wq = nwq; |
| 38 | } |
| 39 | fput(sbi->pipe); /* Close the pipe */ |
| 40 | sbi->pipe = NULL; |
| 41 | sbi->pipefd = -1; |
| 42 | mutex_unlock(&sbi->wq_mutex); |
| 43 | } |
| 44 | |
| 45 | static int autofs_write(struct autofs_sb_info *sbi, |
| 46 | struct file *file, const void *addr, int bytes) |
| 47 | { |
| 48 | unsigned long sigpipe, flags; |
| 49 | const char *data = (const char *)addr; |
| 50 | ssize_t wr = 0; |
| 51 | |
| 52 | sigpipe = sigismember(¤t->pending.signal, SIGPIPE); |
| 53 | |
| 54 | mutex_lock(&sbi->pipe_mutex); |
| 55 | while (bytes) { |
Linus Torvalds | 90fb702 | 2020-09-29 17:18:34 -0700 | [diff] [blame] | 56 | wr = __kernel_write(file, data, bytes, NULL); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 57 | if (wr <= 0) |
| 58 | break; |
| 59 | data += wr; |
| 60 | bytes -= wr; |
| 61 | } |
| 62 | mutex_unlock(&sbi->pipe_mutex); |
| 63 | |
| 64 | /* Keep the currently executing process from receiving a |
| 65 | * SIGPIPE unless it was already supposed to get one |
| 66 | */ |
| 67 | if (wr == -EPIPE && !sigpipe) { |
| 68 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
| 69 | sigdelset(¤t->pending.signal, SIGPIPE); |
| 70 | recalc_sigpending(); |
| 71 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
| 72 | } |
| 73 | |
| 74 | /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ |
| 75 | return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; |
| 76 | } |
| 77 | |
| 78 | static void autofs_notify_daemon(struct autofs_sb_info *sbi, |
| 79 | struct autofs_wait_queue *wq, |
| 80 | int type) |
| 81 | { |
| 82 | union { |
| 83 | struct autofs_packet_hdr hdr; |
| 84 | union autofs_packet_union v4_pkt; |
| 85 | union autofs_v5_packet_union v5_pkt; |
| 86 | } pkt; |
| 87 | struct file *pipe = NULL; |
| 88 | size_t pktsz; |
| 89 | int ret; |
| 90 | |
| 91 | pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", |
| 92 | (unsigned long) wq->wait_queue_token, |
| 93 | wq->name.len, wq->name.name, type); |
| 94 | |
| 95 | memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ |
| 96 | |
| 97 | pkt.hdr.proto_version = sbi->version; |
| 98 | pkt.hdr.type = type; |
| 99 | |
| 100 | switch (type) { |
| 101 | /* Kernel protocol v4 missing and expire packets */ |
| 102 | case autofs_ptype_missing: |
| 103 | { |
| 104 | struct autofs_packet_missing *mp = &pkt.v4_pkt.missing; |
| 105 | |
| 106 | pktsz = sizeof(*mp); |
| 107 | |
| 108 | mp->wait_queue_token = wq->wait_queue_token; |
| 109 | mp->len = wq->name.len; |
| 110 | memcpy(mp->name, wq->name.name, wq->name.len); |
| 111 | mp->name[wq->name.len] = '\0'; |
| 112 | break; |
| 113 | } |
| 114 | case autofs_ptype_expire_multi: |
| 115 | { |
| 116 | struct autofs_packet_expire_multi *ep = |
| 117 | &pkt.v4_pkt.expire_multi; |
| 118 | |
| 119 | pktsz = sizeof(*ep); |
| 120 | |
| 121 | ep->wait_queue_token = wq->wait_queue_token; |
| 122 | ep->len = wq->name.len; |
| 123 | memcpy(ep->name, wq->name.name, wq->name.len); |
| 124 | ep->name[wq->name.len] = '\0'; |
| 125 | break; |
| 126 | } |
| 127 | /* |
| 128 | * Kernel protocol v5 packet for handling indirect and direct |
| 129 | * mount missing and expire requests |
| 130 | */ |
| 131 | case autofs_ptype_missing_indirect: |
| 132 | case autofs_ptype_expire_indirect: |
| 133 | case autofs_ptype_missing_direct: |
| 134 | case autofs_ptype_expire_direct: |
| 135 | { |
| 136 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; |
| 137 | struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns; |
| 138 | |
| 139 | pktsz = sizeof(*packet); |
| 140 | |
| 141 | packet->wait_queue_token = wq->wait_queue_token; |
| 142 | packet->len = wq->name.len; |
| 143 | memcpy(packet->name, wq->name.name, wq->name.len); |
| 144 | packet->name[wq->name.len] = '\0'; |
| 145 | packet->dev = wq->dev; |
| 146 | packet->ino = wq->ino; |
| 147 | packet->uid = from_kuid_munged(user_ns, wq->uid); |
| 148 | packet->gid = from_kgid_munged(user_ns, wq->gid); |
| 149 | packet->pid = wq->pid; |
| 150 | packet->tgid = wq->tgid; |
| 151 | break; |
| 152 | } |
| 153 | default: |
| 154 | pr_warn("bad type %d!\n", type); |
| 155 | mutex_unlock(&sbi->wq_mutex); |
| 156 | return; |
| 157 | } |
| 158 | |
| 159 | pipe = get_file(sbi->pipe); |
| 160 | |
| 161 | mutex_unlock(&sbi->wq_mutex); |
| 162 | |
| 163 | switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) { |
| 164 | case 0: |
| 165 | break; |
| 166 | case -ENOMEM: |
| 167 | case -ERESTARTSYS: |
| 168 | /* Just fail this one */ |
| 169 | autofs_wait_release(sbi, wq->wait_queue_token, ret); |
| 170 | break; |
| 171 | default: |
| 172 | autofs_catatonic_mode(sbi); |
| 173 | break; |
| 174 | } |
| 175 | fput(pipe); |
| 176 | } |
| 177 | |
| 178 | static int autofs_getpath(struct autofs_sb_info *sbi, |
Dan Carpenter | 016e92d | 2018-06-07 17:11:52 -0700 | [diff] [blame] | 179 | struct dentry *dentry, char *name) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 180 | { |
| 181 | struct dentry *root = sbi->sb->s_root; |
| 182 | struct dentry *tmp; |
| 183 | char *buf; |
| 184 | char *p; |
| 185 | int len; |
| 186 | unsigned seq; |
| 187 | |
| 188 | rename_retry: |
Dan Carpenter | 016e92d | 2018-06-07 17:11:52 -0700 | [diff] [blame] | 189 | buf = name; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 190 | len = 0; |
| 191 | |
| 192 | seq = read_seqbegin(&rename_lock); |
| 193 | rcu_read_lock(); |
| 194 | spin_lock(&sbi->fs_lock); |
| 195 | for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent) |
| 196 | len += tmp->d_name.len + 1; |
| 197 | |
| 198 | if (!len || --len > NAME_MAX) { |
| 199 | spin_unlock(&sbi->fs_lock); |
| 200 | rcu_read_unlock(); |
| 201 | if (read_seqretry(&rename_lock, seq)) |
| 202 | goto rename_retry; |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | *(buf + len) = '\0'; |
| 207 | p = buf + len - dentry->d_name.len; |
| 208 | strncpy(p, dentry->d_name.name, dentry->d_name.len); |
| 209 | |
| 210 | for (tmp = dentry->d_parent; tmp != root ; tmp = tmp->d_parent) { |
| 211 | *(--p) = '/'; |
| 212 | p -= tmp->d_name.len; |
| 213 | strncpy(p, tmp->d_name.name, tmp->d_name.len); |
| 214 | } |
| 215 | spin_unlock(&sbi->fs_lock); |
| 216 | rcu_read_unlock(); |
| 217 | if (read_seqretry(&rename_lock, seq)) |
| 218 | goto rename_retry; |
| 219 | |
| 220 | return len; |
| 221 | } |
| 222 | |
| 223 | static struct autofs_wait_queue * |
| 224 | autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr) |
| 225 | { |
| 226 | struct autofs_wait_queue *wq; |
| 227 | |
| 228 | for (wq = sbi->queues; wq; wq = wq->next) { |
| 229 | if (wq->name.hash == qstr->hash && |
| 230 | wq->name.len == qstr->len && |
| 231 | wq->name.name && |
| 232 | !memcmp(wq->name.name, qstr->name, qstr->len)) |
| 233 | break; |
| 234 | } |
| 235 | return wq; |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * Check if we have a valid request. |
| 240 | * Returns |
| 241 | * 1 if the request should continue. |
| 242 | * In this case we can return an autofs_wait_queue entry if one is |
| 243 | * found or NULL to idicate a new wait needs to be created. |
| 244 | * 0 or a negative errno if the request shouldn't continue. |
| 245 | */ |
| 246 | static int validate_request(struct autofs_wait_queue **wait, |
| 247 | struct autofs_sb_info *sbi, |
| 248 | const struct qstr *qstr, |
| 249 | const struct path *path, enum autofs_notify notify) |
| 250 | { |
| 251 | struct dentry *dentry = path->dentry; |
| 252 | struct autofs_wait_queue *wq; |
| 253 | struct autofs_info *ino; |
| 254 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 255 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 256 | return -ENOENT; |
| 257 | |
| 258 | /* Wait in progress, continue; */ |
| 259 | wq = autofs_find_wait(sbi, qstr); |
| 260 | if (wq) { |
| 261 | *wait = wq; |
| 262 | return 1; |
| 263 | } |
| 264 | |
| 265 | *wait = NULL; |
| 266 | |
| 267 | /* If we don't yet have any info this is a new request */ |
| 268 | ino = autofs_dentry_ino(dentry); |
| 269 | if (!ino) |
| 270 | return 1; |
| 271 | |
| 272 | /* |
| 273 | * If we've been asked to wait on an existing expire (NFY_NONE) |
| 274 | * but there is no wait in the queue ... |
| 275 | */ |
| 276 | if (notify == NFY_NONE) { |
| 277 | /* |
| 278 | * Either we've betean the pending expire to post it's |
| 279 | * wait or it finished while we waited on the mutex. |
| 280 | * So we need to wait till either, the wait appears |
| 281 | * or the expire finishes. |
| 282 | */ |
| 283 | |
| 284 | while (ino->flags & AUTOFS_INF_EXPIRING) { |
| 285 | mutex_unlock(&sbi->wq_mutex); |
| 286 | schedule_timeout_interruptible(HZ/10); |
| 287 | if (mutex_lock_interruptible(&sbi->wq_mutex)) |
| 288 | return -EINTR; |
| 289 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 290 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 291 | return -ENOENT; |
| 292 | |
| 293 | wq = autofs_find_wait(sbi, qstr); |
| 294 | if (wq) { |
| 295 | *wait = wq; |
| 296 | return 1; |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | /* |
| 301 | * Not ideal but the status has already gone. Of the two |
| 302 | * cases where we wait on NFY_NONE neither depend on the |
| 303 | * return status of the wait. |
| 304 | */ |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * If we've been asked to trigger a mount and the request |
| 310 | * completed while we waited on the mutex ... |
| 311 | */ |
| 312 | if (notify == NFY_MOUNT) { |
| 313 | struct dentry *new = NULL; |
| 314 | struct path this; |
| 315 | int valid = 1; |
| 316 | |
| 317 | /* |
| 318 | * If the dentry was successfully mounted while we slept |
| 319 | * on the wait queue mutex we can return success. If it |
| 320 | * isn't mounted (doesn't have submounts for the case of |
| 321 | * a multi-mount with no mount at it's base) we can |
| 322 | * continue on and create a new request. |
| 323 | */ |
| 324 | if (!IS_ROOT(dentry)) { |
| 325 | if (d_unhashed(dentry) && |
| 326 | d_really_is_positive(dentry)) { |
| 327 | struct dentry *parent = dentry->d_parent; |
| 328 | |
| 329 | new = d_lookup(parent, &dentry->d_name); |
| 330 | if (new) |
| 331 | dentry = new; |
| 332 | } |
| 333 | } |
| 334 | this.mnt = path->mnt; |
| 335 | this.dentry = dentry; |
| 336 | if (path_has_submounts(&this)) |
| 337 | valid = 0; |
| 338 | |
| 339 | if (new) |
| 340 | dput(new); |
| 341 | return valid; |
| 342 | } |
| 343 | |
| 344 | return 1; |
| 345 | } |
| 346 | |
| 347 | int autofs_wait(struct autofs_sb_info *sbi, |
| 348 | const struct path *path, enum autofs_notify notify) |
| 349 | { |
| 350 | struct dentry *dentry = path->dentry; |
| 351 | struct autofs_wait_queue *wq; |
| 352 | struct qstr qstr; |
| 353 | char *name; |
| 354 | int status, ret, type; |
| 355 | pid_t pid; |
| 356 | pid_t tgid; |
| 357 | |
| 358 | /* In catatonic mode, we don't wait for nobody */ |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 359 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 360 | return -ENOENT; |
| 361 | |
| 362 | /* |
| 363 | * Try translating pids to the namespace of the daemon. |
| 364 | * |
| 365 | * Zero means failure: we are in an unrelated pid namespace. |
| 366 | */ |
| 367 | pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); |
| 368 | tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); |
| 369 | if (pid == 0 || tgid == 0) |
| 370 | return -ENOENT; |
| 371 | |
| 372 | if (d_really_is_negative(dentry)) { |
| 373 | /* |
| 374 | * A wait for a negative dentry is invalid for certain |
| 375 | * cases. A direct or offset mount "always" has its mount |
| 376 | * point directory created and so the request dentry must |
| 377 | * be positive or the map key doesn't exist. The situation |
| 378 | * is very similar for indirect mounts except only dentrys |
| 379 | * in the root of the autofs file system may be negative. |
| 380 | */ |
| 381 | if (autofs_type_trigger(sbi->type)) |
| 382 | return -ENOENT; |
| 383 | else if (!IS_ROOT(dentry->d_parent)) |
| 384 | return -ENOENT; |
| 385 | } |
| 386 | |
| 387 | name = kmalloc(NAME_MAX + 1, GFP_KERNEL); |
| 388 | if (!name) |
| 389 | return -ENOMEM; |
| 390 | |
| 391 | /* If this is a direct mount request create a dummy name */ |
| 392 | if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) |
| 393 | qstr.len = sprintf(name, "%p", dentry); |
| 394 | else { |
Dan Carpenter | 016e92d | 2018-06-07 17:11:52 -0700 | [diff] [blame] | 395 | qstr.len = autofs_getpath(sbi, dentry, name); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 396 | if (!qstr.len) { |
| 397 | kfree(name); |
| 398 | return -ENOENT; |
| 399 | } |
| 400 | } |
| 401 | qstr.name = name; |
| 402 | qstr.hash = full_name_hash(dentry, name, qstr.len); |
| 403 | |
| 404 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { |
| 405 | kfree(qstr.name); |
| 406 | return -EINTR; |
| 407 | } |
| 408 | |
| 409 | ret = validate_request(&wq, sbi, &qstr, path, notify); |
| 410 | if (ret <= 0) { |
| 411 | if (ret != -EINTR) |
| 412 | mutex_unlock(&sbi->wq_mutex); |
| 413 | kfree(qstr.name); |
| 414 | return ret; |
| 415 | } |
| 416 | |
| 417 | if (!wq) { |
| 418 | /* Create a new wait queue */ |
| 419 | wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); |
| 420 | if (!wq) { |
| 421 | kfree(qstr.name); |
| 422 | mutex_unlock(&sbi->wq_mutex); |
| 423 | return -ENOMEM; |
| 424 | } |
| 425 | |
| 426 | wq->wait_queue_token = autofs_next_wait_queue; |
| 427 | if (++autofs_next_wait_queue == 0) |
| 428 | autofs_next_wait_queue = 1; |
| 429 | wq->next = sbi->queues; |
| 430 | sbi->queues = wq; |
| 431 | init_waitqueue_head(&wq->queue); |
| 432 | memcpy(&wq->name, &qstr, sizeof(struct qstr)); |
| 433 | wq->dev = autofs_get_dev(sbi); |
| 434 | wq->ino = autofs_get_ino(sbi); |
| 435 | wq->uid = current_uid(); |
| 436 | wq->gid = current_gid(); |
| 437 | wq->pid = pid; |
| 438 | wq->tgid = tgid; |
| 439 | wq->status = -EINTR; /* Status return if interrupted */ |
| 440 | wq->wait_ctr = 2; |
| 441 | |
| 442 | if (sbi->version < 5) { |
| 443 | if (notify == NFY_MOUNT) |
| 444 | type = autofs_ptype_missing; |
| 445 | else |
| 446 | type = autofs_ptype_expire_multi; |
| 447 | } else { |
| 448 | if (notify == NFY_MOUNT) |
| 449 | type = autofs_type_trigger(sbi->type) ? |
| 450 | autofs_ptype_missing_direct : |
| 451 | autofs_ptype_missing_indirect; |
| 452 | else |
| 453 | type = autofs_type_trigger(sbi->type) ? |
| 454 | autofs_ptype_expire_direct : |
| 455 | autofs_ptype_expire_indirect; |
| 456 | } |
| 457 | |
| 458 | pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
| 459 | (unsigned long) wq->wait_queue_token, wq->name.len, |
| 460 | wq->name.name, notify); |
| 461 | |
| 462 | /* |
| 463 | * autofs_notify_daemon() may block; it will unlock ->wq_mutex |
| 464 | */ |
| 465 | autofs_notify_daemon(sbi, wq, type); |
| 466 | } else { |
| 467 | wq->wait_ctr++; |
| 468 | pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
| 469 | (unsigned long) wq->wait_queue_token, wq->name.len, |
| 470 | wq->name.name, notify); |
| 471 | mutex_unlock(&sbi->wq_mutex); |
| 472 | kfree(qstr.name); |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * wq->name.name is NULL iff the lock is already released |
| 477 | * or the mount has been made catatonic. |
| 478 | */ |
| 479 | wait_event_killable(wq->queue, wq->name.name == NULL); |
| 480 | status = wq->status; |
| 481 | |
| 482 | /* |
| 483 | * For direct and offset mounts we need to track the requester's |
| 484 | * uid and gid in the dentry info struct. This is so it can be |
| 485 | * supplied, on request, by the misc device ioctl interface. |
| 486 | * This is needed during daemon resatart when reconnecting |
| 487 | * to existing, active, autofs mounts. The uid and gid (and |
| 488 | * related string values) may be used for macro substitution |
| 489 | * in autofs mount maps. |
| 490 | */ |
| 491 | if (!status) { |
| 492 | struct autofs_info *ino; |
| 493 | struct dentry *de = NULL; |
| 494 | |
| 495 | /* direct mount or browsable map */ |
| 496 | ino = autofs_dentry_ino(dentry); |
| 497 | if (!ino) { |
| 498 | /* If not lookup actual dentry used */ |
| 499 | de = d_lookup(dentry->d_parent, &dentry->d_name); |
| 500 | if (de) |
| 501 | ino = autofs_dentry_ino(de); |
| 502 | } |
| 503 | |
| 504 | /* Set mount requester */ |
| 505 | if (ino) { |
| 506 | spin_lock(&sbi->fs_lock); |
| 507 | ino->uid = wq->uid; |
| 508 | ino->gid = wq->gid; |
| 509 | spin_unlock(&sbi->fs_lock); |
| 510 | } |
| 511 | |
| 512 | if (de) |
| 513 | dput(de); |
| 514 | } |
| 515 | |
| 516 | /* Are we the last process to need status? */ |
| 517 | mutex_lock(&sbi->wq_mutex); |
| 518 | if (!--wq->wait_ctr) |
| 519 | kfree(wq); |
| 520 | mutex_unlock(&sbi->wq_mutex); |
| 521 | |
| 522 | return status; |
| 523 | } |
| 524 | |
| 525 | |
| 526 | int autofs_wait_release(struct autofs_sb_info *sbi, |
| 527 | autofs_wqt_t wait_queue_token, int status) |
| 528 | { |
| 529 | struct autofs_wait_queue *wq, **wql; |
| 530 | |
| 531 | mutex_lock(&sbi->wq_mutex); |
| 532 | for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { |
| 533 | if (wq->wait_queue_token == wait_queue_token) |
| 534 | break; |
| 535 | } |
| 536 | |
| 537 | if (!wq) { |
| 538 | mutex_unlock(&sbi->wq_mutex); |
| 539 | return -EINVAL; |
| 540 | } |
| 541 | |
| 542 | *wql = wq->next; /* Unlink from chain */ |
| 543 | kfree(wq->name.name); |
| 544 | wq->name.name = NULL; /* Do not wait on this queue */ |
| 545 | wq->status = status; |
| 546 | wake_up(&wq->queue); |
| 547 | if (!--wq->wait_ctr) |
| 548 | kfree(wq); |
| 549 | mutex_unlock(&sbi->wq_mutex); |
| 550 | |
| 551 | return 0; |
| 552 | } |