Thomas Gleixner | d691005 | 2019-05-22 09:51:29 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved |
| 4 | * Copyright 2001-2006 Ian Kent <raven@themaw.net> |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 7 | #include <linux/sched/signal.h> |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 8 | #include "autofs_i.h" |
| 9 | |
| 10 | /* We make this a static variable rather than a part of the superblock; it |
| 11 | * is better if we don't reassign numbers easily even across filesystems |
| 12 | */ |
| 13 | static autofs_wqt_t autofs_next_wait_queue = 1; |
| 14 | |
| 15 | void autofs_catatonic_mode(struct autofs_sb_info *sbi) |
| 16 | { |
| 17 | struct autofs_wait_queue *wq, *nwq; |
| 18 | |
| 19 | mutex_lock(&sbi->wq_mutex); |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 20 | if (sbi->flags & AUTOFS_SBI_CATATONIC) { |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 21 | mutex_unlock(&sbi->wq_mutex); |
| 22 | return; |
| 23 | } |
| 24 | |
| 25 | pr_debug("entering catatonic mode\n"); |
| 26 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 27 | sbi->flags |= AUTOFS_SBI_CATATONIC; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 28 | wq = sbi->queues; |
| 29 | sbi->queues = NULL; /* Erase all wait queues */ |
| 30 | while (wq) { |
| 31 | nwq = wq->next; |
| 32 | wq->status = -ENOENT; /* Magic is gone - report failure */ |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 33 | kfree(wq->name.name - wq->offset); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 34 | wq->name.name = NULL; |
| 35 | wq->wait_ctr--; |
| 36 | wake_up_interruptible(&wq->queue); |
| 37 | wq = nwq; |
| 38 | } |
| 39 | fput(sbi->pipe); /* Close the pipe */ |
| 40 | sbi->pipe = NULL; |
| 41 | sbi->pipefd = -1; |
| 42 | mutex_unlock(&sbi->wq_mutex); |
| 43 | } |
| 44 | |
| 45 | static int autofs_write(struct autofs_sb_info *sbi, |
| 46 | struct file *file, const void *addr, int bytes) |
| 47 | { |
| 48 | unsigned long sigpipe, flags; |
| 49 | const char *data = (const char *)addr; |
| 50 | ssize_t wr = 0; |
| 51 | |
| 52 | sigpipe = sigismember(¤t->pending.signal, SIGPIPE); |
| 53 | |
| 54 | mutex_lock(&sbi->pipe_mutex); |
| 55 | while (bytes) { |
Linus Torvalds | 90fb702 | 2020-09-29 17:18:34 -0700 | [diff] [blame] | 56 | wr = __kernel_write(file, data, bytes, NULL); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 57 | if (wr <= 0) |
| 58 | break; |
| 59 | data += wr; |
| 60 | bytes -= wr; |
| 61 | } |
| 62 | mutex_unlock(&sbi->pipe_mutex); |
| 63 | |
| 64 | /* Keep the currently executing process from receiving a |
| 65 | * SIGPIPE unless it was already supposed to get one |
| 66 | */ |
| 67 | if (wr == -EPIPE && !sigpipe) { |
| 68 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
| 69 | sigdelset(¤t->pending.signal, SIGPIPE); |
| 70 | recalc_sigpending(); |
| 71 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
| 72 | } |
| 73 | |
| 74 | /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ |
| 75 | return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; |
| 76 | } |
| 77 | |
| 78 | static void autofs_notify_daemon(struct autofs_sb_info *sbi, |
| 79 | struct autofs_wait_queue *wq, |
| 80 | int type) |
| 81 | { |
| 82 | union { |
| 83 | struct autofs_packet_hdr hdr; |
| 84 | union autofs_packet_union v4_pkt; |
| 85 | union autofs_v5_packet_union v5_pkt; |
| 86 | } pkt; |
| 87 | struct file *pipe = NULL; |
| 88 | size_t pktsz; |
| 89 | int ret; |
| 90 | |
| 91 | pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", |
| 92 | (unsigned long) wq->wait_queue_token, |
| 93 | wq->name.len, wq->name.name, type); |
| 94 | |
| 95 | memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ |
| 96 | |
| 97 | pkt.hdr.proto_version = sbi->version; |
| 98 | pkt.hdr.type = type; |
| 99 | |
| 100 | switch (type) { |
| 101 | /* Kernel protocol v4 missing and expire packets */ |
| 102 | case autofs_ptype_missing: |
| 103 | { |
| 104 | struct autofs_packet_missing *mp = &pkt.v4_pkt.missing; |
| 105 | |
| 106 | pktsz = sizeof(*mp); |
| 107 | |
| 108 | mp->wait_queue_token = wq->wait_queue_token; |
| 109 | mp->len = wq->name.len; |
| 110 | memcpy(mp->name, wq->name.name, wq->name.len); |
| 111 | mp->name[wq->name.len] = '\0'; |
| 112 | break; |
| 113 | } |
| 114 | case autofs_ptype_expire_multi: |
| 115 | { |
| 116 | struct autofs_packet_expire_multi *ep = |
| 117 | &pkt.v4_pkt.expire_multi; |
| 118 | |
| 119 | pktsz = sizeof(*ep); |
| 120 | |
| 121 | ep->wait_queue_token = wq->wait_queue_token; |
| 122 | ep->len = wq->name.len; |
| 123 | memcpy(ep->name, wq->name.name, wq->name.len); |
| 124 | ep->name[wq->name.len] = '\0'; |
| 125 | break; |
| 126 | } |
| 127 | /* |
| 128 | * Kernel protocol v5 packet for handling indirect and direct |
| 129 | * mount missing and expire requests |
| 130 | */ |
| 131 | case autofs_ptype_missing_indirect: |
| 132 | case autofs_ptype_expire_indirect: |
| 133 | case autofs_ptype_missing_direct: |
| 134 | case autofs_ptype_expire_direct: |
| 135 | { |
| 136 | struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; |
| 137 | struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns; |
| 138 | |
| 139 | pktsz = sizeof(*packet); |
| 140 | |
| 141 | packet->wait_queue_token = wq->wait_queue_token; |
| 142 | packet->len = wq->name.len; |
| 143 | memcpy(packet->name, wq->name.name, wq->name.len); |
| 144 | packet->name[wq->name.len] = '\0'; |
| 145 | packet->dev = wq->dev; |
| 146 | packet->ino = wq->ino; |
| 147 | packet->uid = from_kuid_munged(user_ns, wq->uid); |
| 148 | packet->gid = from_kgid_munged(user_ns, wq->gid); |
| 149 | packet->pid = wq->pid; |
| 150 | packet->tgid = wq->tgid; |
| 151 | break; |
| 152 | } |
| 153 | default: |
| 154 | pr_warn("bad type %d!\n", type); |
| 155 | mutex_unlock(&sbi->wq_mutex); |
| 156 | return; |
| 157 | } |
| 158 | |
| 159 | pipe = get_file(sbi->pipe); |
| 160 | |
| 161 | mutex_unlock(&sbi->wq_mutex); |
| 162 | |
| 163 | switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) { |
| 164 | case 0: |
| 165 | break; |
| 166 | case -ENOMEM: |
| 167 | case -ERESTARTSYS: |
| 168 | /* Just fail this one */ |
| 169 | autofs_wait_release(sbi, wq->wait_queue_token, ret); |
| 170 | break; |
| 171 | default: |
| 172 | autofs_catatonic_mode(sbi); |
| 173 | break; |
| 174 | } |
| 175 | fput(pipe); |
| 176 | } |
| 177 | |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 178 | static struct autofs_wait_queue * |
| 179 | autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr) |
| 180 | { |
| 181 | struct autofs_wait_queue *wq; |
| 182 | |
| 183 | for (wq = sbi->queues; wq; wq = wq->next) { |
| 184 | if (wq->name.hash == qstr->hash && |
| 185 | wq->name.len == qstr->len && |
| 186 | wq->name.name && |
| 187 | !memcmp(wq->name.name, qstr->name, qstr->len)) |
| 188 | break; |
| 189 | } |
| 190 | return wq; |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Check if we have a valid request. |
| 195 | * Returns |
| 196 | * 1 if the request should continue. |
| 197 | * In this case we can return an autofs_wait_queue entry if one is |
| 198 | * found or NULL to idicate a new wait needs to be created. |
| 199 | * 0 or a negative errno if the request shouldn't continue. |
| 200 | */ |
| 201 | static int validate_request(struct autofs_wait_queue **wait, |
| 202 | struct autofs_sb_info *sbi, |
| 203 | const struct qstr *qstr, |
| 204 | const struct path *path, enum autofs_notify notify) |
| 205 | { |
| 206 | struct dentry *dentry = path->dentry; |
| 207 | struct autofs_wait_queue *wq; |
| 208 | struct autofs_info *ino; |
| 209 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 210 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 211 | return -ENOENT; |
| 212 | |
| 213 | /* Wait in progress, continue; */ |
| 214 | wq = autofs_find_wait(sbi, qstr); |
| 215 | if (wq) { |
| 216 | *wait = wq; |
| 217 | return 1; |
| 218 | } |
| 219 | |
| 220 | *wait = NULL; |
| 221 | |
| 222 | /* If we don't yet have any info this is a new request */ |
| 223 | ino = autofs_dentry_ino(dentry); |
| 224 | if (!ino) |
| 225 | return 1; |
| 226 | |
| 227 | /* |
| 228 | * If we've been asked to wait on an existing expire (NFY_NONE) |
| 229 | * but there is no wait in the queue ... |
| 230 | */ |
| 231 | if (notify == NFY_NONE) { |
| 232 | /* |
| 233 | * Either we've betean the pending expire to post it's |
| 234 | * wait or it finished while we waited on the mutex. |
| 235 | * So we need to wait till either, the wait appears |
| 236 | * or the expire finishes. |
| 237 | */ |
| 238 | |
| 239 | while (ino->flags & AUTOFS_INF_EXPIRING) { |
| 240 | mutex_unlock(&sbi->wq_mutex); |
| 241 | schedule_timeout_interruptible(HZ/10); |
| 242 | if (mutex_lock_interruptible(&sbi->wq_mutex)) |
| 243 | return -EINTR; |
| 244 | |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 245 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 246 | return -ENOENT; |
| 247 | |
| 248 | wq = autofs_find_wait(sbi, qstr); |
| 249 | if (wq) { |
| 250 | *wait = wq; |
| 251 | return 1; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * Not ideal but the status has already gone. Of the two |
| 257 | * cases where we wait on NFY_NONE neither depend on the |
| 258 | * return status of the wait. |
| 259 | */ |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * If we've been asked to trigger a mount and the request |
| 265 | * completed while we waited on the mutex ... |
| 266 | */ |
| 267 | if (notify == NFY_MOUNT) { |
| 268 | struct dentry *new = NULL; |
| 269 | struct path this; |
| 270 | int valid = 1; |
| 271 | |
| 272 | /* |
| 273 | * If the dentry was successfully mounted while we slept |
| 274 | * on the wait queue mutex we can return success. If it |
| 275 | * isn't mounted (doesn't have submounts for the case of |
| 276 | * a multi-mount with no mount at it's base) we can |
| 277 | * continue on and create a new request. |
| 278 | */ |
| 279 | if (!IS_ROOT(dentry)) { |
| 280 | if (d_unhashed(dentry) && |
| 281 | d_really_is_positive(dentry)) { |
| 282 | struct dentry *parent = dentry->d_parent; |
| 283 | |
| 284 | new = d_lookup(parent, &dentry->d_name); |
| 285 | if (new) |
| 286 | dentry = new; |
| 287 | } |
| 288 | } |
| 289 | this.mnt = path->mnt; |
| 290 | this.dentry = dentry; |
| 291 | if (path_has_submounts(&this)) |
| 292 | valid = 0; |
| 293 | |
| 294 | if (new) |
| 295 | dput(new); |
| 296 | return valid; |
| 297 | } |
| 298 | |
| 299 | return 1; |
| 300 | } |
| 301 | |
| 302 | int autofs_wait(struct autofs_sb_info *sbi, |
| 303 | const struct path *path, enum autofs_notify notify) |
| 304 | { |
| 305 | struct dentry *dentry = path->dentry; |
| 306 | struct autofs_wait_queue *wq; |
| 307 | struct qstr qstr; |
| 308 | char *name; |
| 309 | int status, ret, type; |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 310 | unsigned int offset = 0; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 311 | pid_t pid; |
| 312 | pid_t tgid; |
| 313 | |
| 314 | /* In catatonic mode, we don't wait for nobody */ |
Ian Kent | 9d8719a | 2019-01-03 15:27:39 -0800 | [diff] [blame] | 315 | if (sbi->flags & AUTOFS_SBI_CATATONIC) |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 316 | return -ENOENT; |
| 317 | |
| 318 | /* |
| 319 | * Try translating pids to the namespace of the daemon. |
| 320 | * |
| 321 | * Zero means failure: we are in an unrelated pid namespace. |
| 322 | */ |
| 323 | pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); |
| 324 | tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); |
| 325 | if (pid == 0 || tgid == 0) |
| 326 | return -ENOENT; |
| 327 | |
| 328 | if (d_really_is_negative(dentry)) { |
| 329 | /* |
| 330 | * A wait for a negative dentry is invalid for certain |
| 331 | * cases. A direct or offset mount "always" has its mount |
| 332 | * point directory created and so the request dentry must |
| 333 | * be positive or the map key doesn't exist. The situation |
| 334 | * is very similar for indirect mounts except only dentrys |
| 335 | * in the root of the autofs file system may be negative. |
| 336 | */ |
| 337 | if (autofs_type_trigger(sbi->type)) |
| 338 | return -ENOENT; |
| 339 | else if (!IS_ROOT(dentry->d_parent)) |
| 340 | return -ENOENT; |
| 341 | } |
| 342 | |
| 343 | name = kmalloc(NAME_MAX + 1, GFP_KERNEL); |
| 344 | if (!name) |
| 345 | return -ENOMEM; |
| 346 | |
| 347 | /* If this is a direct mount request create a dummy name */ |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 348 | if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) { |
| 349 | qstr.name = name; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 350 | qstr.len = sprintf(name, "%p", dentry); |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 351 | } else { |
| 352 | char *p = dentry_path_raw(dentry, name, NAME_MAX); |
| 353 | if (IS_ERR(p)) { |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 354 | kfree(name); |
| 355 | return -ENOENT; |
| 356 | } |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 357 | qstr.name = ++p; // skip the leading slash |
| 358 | qstr.len = strlen(p); |
| 359 | offset = p - name; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 360 | } |
Ian Kent | 25f54d0 | 2021-09-23 15:13:39 +0800 | [diff] [blame] | 361 | qstr.hash = full_name_hash(dentry, qstr.name, qstr.len); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 362 | |
| 363 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 364 | kfree(name); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 365 | return -EINTR; |
| 366 | } |
| 367 | |
| 368 | ret = validate_request(&wq, sbi, &qstr, path, notify); |
| 369 | if (ret <= 0) { |
| 370 | if (ret != -EINTR) |
| 371 | mutex_unlock(&sbi->wq_mutex); |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 372 | kfree(name); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 373 | return ret; |
| 374 | } |
| 375 | |
| 376 | if (!wq) { |
| 377 | /* Create a new wait queue */ |
| 378 | wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); |
| 379 | if (!wq) { |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 380 | kfree(name); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 381 | mutex_unlock(&sbi->wq_mutex); |
| 382 | return -ENOMEM; |
| 383 | } |
| 384 | |
| 385 | wq->wait_queue_token = autofs_next_wait_queue; |
| 386 | if (++autofs_next_wait_queue == 0) |
| 387 | autofs_next_wait_queue = 1; |
| 388 | wq->next = sbi->queues; |
| 389 | sbi->queues = wq; |
| 390 | init_waitqueue_head(&wq->queue); |
| 391 | memcpy(&wq->name, &qstr, sizeof(struct qstr)); |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 392 | wq->offset = offset; |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 393 | wq->dev = autofs_get_dev(sbi); |
| 394 | wq->ino = autofs_get_ino(sbi); |
| 395 | wq->uid = current_uid(); |
| 396 | wq->gid = current_gid(); |
| 397 | wq->pid = pid; |
| 398 | wq->tgid = tgid; |
| 399 | wq->status = -EINTR; /* Status return if interrupted */ |
| 400 | wq->wait_ctr = 2; |
| 401 | |
| 402 | if (sbi->version < 5) { |
| 403 | if (notify == NFY_MOUNT) |
| 404 | type = autofs_ptype_missing; |
| 405 | else |
| 406 | type = autofs_ptype_expire_multi; |
| 407 | } else { |
| 408 | if (notify == NFY_MOUNT) |
| 409 | type = autofs_type_trigger(sbi->type) ? |
| 410 | autofs_ptype_missing_direct : |
| 411 | autofs_ptype_missing_indirect; |
| 412 | else |
| 413 | type = autofs_type_trigger(sbi->type) ? |
| 414 | autofs_ptype_expire_direct : |
| 415 | autofs_ptype_expire_indirect; |
| 416 | } |
| 417 | |
| 418 | pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
| 419 | (unsigned long) wq->wait_queue_token, wq->name.len, |
| 420 | wq->name.name, notify); |
| 421 | |
| 422 | /* |
| 423 | * autofs_notify_daemon() may block; it will unlock ->wq_mutex |
| 424 | */ |
| 425 | autofs_notify_daemon(sbi, wq, type); |
| 426 | } else { |
| 427 | wq->wait_ctr++; |
| 428 | pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", |
| 429 | (unsigned long) wq->wait_queue_token, wq->name.len, |
| 430 | wq->name.name, notify); |
| 431 | mutex_unlock(&sbi->wq_mutex); |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 432 | kfree(name); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 433 | } |
| 434 | |
| 435 | /* |
| 436 | * wq->name.name is NULL iff the lock is already released |
| 437 | * or the mount has been made catatonic. |
| 438 | */ |
| 439 | wait_event_killable(wq->queue, wq->name.name == NULL); |
| 440 | status = wq->status; |
| 441 | |
| 442 | /* |
| 443 | * For direct and offset mounts we need to track the requester's |
| 444 | * uid and gid in the dentry info struct. This is so it can be |
| 445 | * supplied, on request, by the misc device ioctl interface. |
| 446 | * This is needed during daemon resatart when reconnecting |
| 447 | * to existing, active, autofs mounts. The uid and gid (and |
| 448 | * related string values) may be used for macro substitution |
| 449 | * in autofs mount maps. |
| 450 | */ |
| 451 | if (!status) { |
| 452 | struct autofs_info *ino; |
| 453 | struct dentry *de = NULL; |
| 454 | |
| 455 | /* direct mount or browsable map */ |
| 456 | ino = autofs_dentry_ino(dentry); |
| 457 | if (!ino) { |
| 458 | /* If not lookup actual dentry used */ |
| 459 | de = d_lookup(dentry->d_parent, &dentry->d_name); |
| 460 | if (de) |
| 461 | ino = autofs_dentry_ino(de); |
| 462 | } |
| 463 | |
| 464 | /* Set mount requester */ |
| 465 | if (ino) { |
| 466 | spin_lock(&sbi->fs_lock); |
| 467 | ino->uid = wq->uid; |
| 468 | ino->gid = wq->gid; |
| 469 | spin_unlock(&sbi->fs_lock); |
| 470 | } |
| 471 | |
| 472 | if (de) |
| 473 | dput(de); |
| 474 | } |
| 475 | |
| 476 | /* Are we the last process to need status? */ |
| 477 | mutex_lock(&sbi->wq_mutex); |
| 478 | if (!--wq->wait_ctr) |
| 479 | kfree(wq); |
| 480 | mutex_unlock(&sbi->wq_mutex); |
| 481 | |
| 482 | return status; |
| 483 | } |
| 484 | |
| 485 | |
| 486 | int autofs_wait_release(struct autofs_sb_info *sbi, |
| 487 | autofs_wqt_t wait_queue_token, int status) |
| 488 | { |
| 489 | struct autofs_wait_queue *wq, **wql; |
| 490 | |
| 491 | mutex_lock(&sbi->wq_mutex); |
| 492 | for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { |
| 493 | if (wq->wait_queue_token == wait_queue_token) |
| 494 | break; |
| 495 | } |
| 496 | |
| 497 | if (!wq) { |
| 498 | mutex_unlock(&sbi->wq_mutex); |
| 499 | return -EINVAL; |
| 500 | } |
| 501 | |
| 502 | *wql = wq->next; /* Unlink from chain */ |
Al Viro | 2be7828 | 2021-03-08 10:16:16 -0500 | [diff] [blame] | 503 | kfree(wq->name.name - wq->offset); |
Ian Kent | ebc921c | 2018-06-07 17:11:13 -0700 | [diff] [blame] | 504 | wq->name.name = NULL; /* Do not wait on this queue */ |
| 505 | wq->status = status; |
| 506 | wake_up(&wq->queue); |
| 507 | if (!--wq->wait_ctr) |
| 508 | kfree(wq); |
| 509 | mutex_unlock(&sbi->wq_mutex); |
| 510 | |
| 511 | return 0; |
| 512 | } |