Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | FUSE: Filesystem in Userspace |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 3 | Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 4 | |
| 5 | This program can be distributed under the terms of the GNU GPL. |
| 6 | See the file COPYING. |
| 7 | */ |
| 8 | |
| 9 | #include "fuse_i.h" |
| 10 | |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/poll.h> |
| 14 | #include <linux/uio.h> |
| 15 | #include <linux/miscdevice.h> |
| 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/file.h> |
| 18 | #include <linux/slab.h> |
| 19 | |
| 20 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); |
| 21 | |
| 22 | static kmem_cache_t *fuse_req_cachep; |
| 23 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 25 | { |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 26 | /* |
| 27 | * Lockless access is OK, because file->private data is set |
| 28 | * once during mount and is valid until the file is released. |
| 29 | */ |
| 30 | return file->private_data; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 31 | } |
| 32 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 33 | static void fuse_request_init(struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 34 | { |
| 35 | memset(req, 0, sizeof(*req)); |
| 36 | INIT_LIST_HEAD(&req->list); |
| 37 | init_waitqueue_head(&req->waitq); |
| 38 | atomic_set(&req->count, 1); |
| 39 | } |
| 40 | |
| 41 | struct fuse_req *fuse_request_alloc(void) |
| 42 | { |
| 43 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); |
| 44 | if (req) |
| 45 | fuse_request_init(req); |
| 46 | return req; |
| 47 | } |
| 48 | |
| 49 | void fuse_request_free(struct fuse_req *req) |
| 50 | { |
| 51 | kmem_cache_free(fuse_req_cachep, req); |
| 52 | } |
| 53 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 54 | static void block_sigs(sigset_t *oldset) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 55 | { |
| 56 | sigset_t mask; |
| 57 | |
| 58 | siginitsetinv(&mask, sigmask(SIGKILL)); |
| 59 | sigprocmask(SIG_BLOCK, &mask, oldset); |
| 60 | } |
| 61 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 62 | static void restore_sigs(sigset_t *oldset) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 63 | { |
| 64 | sigprocmask(SIG_SETMASK, oldset, NULL); |
| 65 | } |
| 66 | |
Miklos Szeredi | 77e7f25 | 2006-02-17 13:52:52 -0800 | [diff] [blame] | 67 | /* |
| 68 | * Reset request, so that it can be reused |
| 69 | * |
| 70 | * The caller must be _very_ careful to make sure, that it is holding |
| 71 | * the only reference to req |
| 72 | */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 73 | void fuse_reset_request(struct fuse_req *req) |
| 74 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 75 | BUG_ON(atomic_read(&req->count) != 1); |
| 76 | fuse_request_init(req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | static void __fuse_get_request(struct fuse_req *req) |
| 80 | { |
| 81 | atomic_inc(&req->count); |
| 82 | } |
| 83 | |
| 84 | /* Must be called with > 1 refcount */ |
| 85 | static void __fuse_put_request(struct fuse_req *req) |
| 86 | { |
| 87 | BUG_ON(atomic_read(&req->count) < 2); |
| 88 | atomic_dec(&req->count); |
| 89 | } |
| 90 | |
Miklos Szeredi | ce1d5a4 | 2006-04-10 22:54:58 -0700 | [diff] [blame] | 91 | struct fuse_req *fuse_get_req(struct fuse_conn *fc) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 92 | { |
Miklos Szeredi | 08a53cd | 2006-04-10 22:54:59 -0700 | [diff] [blame^] | 93 | struct fuse_req *req; |
| 94 | sigset_t oldset; |
| 95 | int err; |
| 96 | |
| 97 | block_sigs(&oldset); |
| 98 | err = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); |
| 99 | restore_sigs(&oldset); |
| 100 | if (err) |
| 101 | return ERR_PTR(-EINTR); |
| 102 | |
| 103 | req = fuse_request_alloc(); |
Miklos Szeredi | ce1d5a4 | 2006-04-10 22:54:58 -0700 | [diff] [blame] | 104 | if (!req) |
| 105 | return ERR_PTR(-ENOMEM); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 106 | |
Miklos Szeredi | ce1d5a4 | 2006-04-10 22:54:58 -0700 | [diff] [blame] | 107 | atomic_inc(&fc->num_waiting); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 108 | fuse_request_init(req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 109 | req->in.h.uid = current->fsuid; |
| 110 | req->in.h.gid = current->fsgid; |
| 111 | req->in.h.pid = current->pid; |
| 112 | return req; |
| 113 | } |
| 114 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 115 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
| 116 | { |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 117 | if (atomic_dec_and_test(&req->count)) { |
Miklos Szeredi | ce1d5a4 | 2006-04-10 22:54:58 -0700 | [diff] [blame] | 118 | atomic_dec(&fc->num_waiting); |
| 119 | fuse_request_free(req); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 120 | } |
| 121 | } |
| 122 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 123 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 124 | { |
| 125 | iput(req->inode); |
| 126 | iput(req->inode2); |
| 127 | if (req->file) |
| 128 | fput(req->file); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 129 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 130 | list_del(&req->bg_entry); |
Miklos Szeredi | 08a53cd | 2006-04-10 22:54:59 -0700 | [diff] [blame^] | 131 | if (fc->num_background == FUSE_MAX_BACKGROUND) { |
| 132 | fc->blocked = 0; |
| 133 | wake_up_all(&fc->blocked_waitq); |
| 134 | } |
| 135 | fc->num_background--; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 136 | spin_unlock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 139 | /* |
| 140 | * This function is called when a request is finished. Either a reply |
| 141 | * has arrived or it was interrupted (and not yet sent) or some error |
Miklos Szeredi | f43b155 | 2006-01-16 22:14:26 -0800 | [diff] [blame] | 142 | * occurred during communication with userspace, or the device file |
| 143 | * was closed. In case of a background request the reference to the |
| 144 | * stored objects are released. The requester thread is woken up (if |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 145 | * still waiting), the 'end' callback is called if given, else the |
| 146 | * reference to the request is released |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 147 | * |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 148 | * Releasing extra reference for foreground requests must be done |
| 149 | * within the same locked region as setting state to finished. This |
| 150 | * is because fuse_reset_request() may be called after request is |
| 151 | * finished and it must be the sole possessor. If request is |
| 152 | * interrupted and put in the background, it will return with an error |
| 153 | * and hence never be reset and reused. |
| 154 | * |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 155 | * Called with fc->lock, unlocks it |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 156 | */ |
| 157 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
| 158 | { |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 159 | list_del(&req->list); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 160 | req->state = FUSE_REQ_FINISHED; |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 161 | if (!req->background) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 162 | spin_unlock(&fc->lock); |
Miklos Szeredi | ce1d5a4 | 2006-04-10 22:54:58 -0700 | [diff] [blame] | 163 | wake_up(&req->waitq); |
| 164 | fuse_put_request(fc, req); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 165 | } else { |
| 166 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 167 | req->end = NULL; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 168 | spin_unlock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 169 | down_read(&fc->sbput_sem); |
| 170 | if (fc->mounted) |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 171 | fuse_release_background(fc, req); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 172 | up_read(&fc->sbput_sem); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 173 | if (end) |
| 174 | end(fc, req); |
| 175 | else |
| 176 | fuse_put_request(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 177 | } |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 178 | } |
| 179 | |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 180 | /* |
| 181 | * Unfortunately request interruption not just solves the deadlock |
| 182 | * problem, it causes problems too. These stem from the fact, that an |
| 183 | * interrupted request is continued to be processed in userspace, |
| 184 | * while all the locks and object references (inode and file) held |
| 185 | * during the operation are released. |
| 186 | * |
| 187 | * To release the locks is exactly why there's a need to interrupt the |
| 188 | * request, so there's not a lot that can be done about this, except |
| 189 | * introduce additional locking in userspace. |
| 190 | * |
| 191 | * More important is to keep inode and file references until userspace |
| 192 | * has replied, otherwise FORGET and RELEASE could be sent while the |
| 193 | * inode/file is still used by the filesystem. |
| 194 | * |
| 195 | * For this reason the concept of "background" request is introduced. |
| 196 | * An interrupted request is backgrounded if it has been already sent |
| 197 | * to userspace. Backgrounding involves getting an extra reference to |
| 198 | * inode(s) or file used in the request, and adding the request to |
| 199 | * fc->background list. When a reply is received for a background |
| 200 | * request, the object references are released, and the request is |
| 201 | * removed from the list. If the filesystem is unmounted while there |
| 202 | * are still background requests, the list is walked and references |
| 203 | * are released as if a reply was received. |
| 204 | * |
| 205 | * There's one more use for a background request. The RELEASE message is |
| 206 | * always sent as background, since it doesn't return an error or |
| 207 | * data. |
| 208 | */ |
| 209 | static void background_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 210 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 211 | req->background = 1; |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 212 | list_add(&req->bg_entry, &fc->background); |
Miklos Szeredi | 08a53cd | 2006-04-10 22:54:59 -0700 | [diff] [blame^] | 213 | fc->num_background++; |
| 214 | if (fc->num_background == FUSE_MAX_BACKGROUND) |
| 215 | fc->blocked = 1; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 216 | if (req->inode) |
| 217 | req->inode = igrab(req->inode); |
| 218 | if (req->inode2) |
| 219 | req->inode2 = igrab(req->inode2); |
| 220 | if (req->file) |
| 221 | get_file(req->file); |
| 222 | } |
| 223 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 224 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 225 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 226 | { |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 227 | sigset_t oldset; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 228 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 229 | spin_unlock(&fc->lock); |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 230 | block_sigs(&oldset); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 231 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 232 | restore_sigs(&oldset); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 233 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 234 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 235 | return; |
| 236 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 237 | if (!req->interrupted) { |
| 238 | req->out.h.error = -EINTR; |
| 239 | req->interrupted = 1; |
| 240 | } |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 241 | if (req->locked) { |
| 242 | /* This is uninterruptible sleep, because data is |
| 243 | being copied to/from the buffers of req. During |
| 244 | locked state, there mustn't be any filesystem |
| 245 | operation (e.g. page fault), since that could lead |
| 246 | to deadlock */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 247 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 248 | wait_event(req->waitq, !req->locked); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 249 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 250 | } |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 251 | if (req->state == FUSE_REQ_PENDING) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 252 | list_del(&req->list); |
| 253 | __fuse_put_request(req); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 254 | } else if (req->state == FUSE_REQ_SENT) |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 255 | background_request(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
| 259 | { |
| 260 | unsigned nbytes = 0; |
| 261 | unsigned i; |
| 262 | |
| 263 | for (i = 0; i < numargs; i++) |
| 264 | nbytes += args[i].size; |
| 265 | |
| 266 | return nbytes; |
| 267 | } |
| 268 | |
| 269 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) |
| 270 | { |
| 271 | fc->reqctr++; |
| 272 | /* zero is special */ |
| 273 | if (fc->reqctr == 0) |
| 274 | fc->reqctr = 1; |
| 275 | req->in.h.unique = fc->reqctr; |
| 276 | req->in.h.len = sizeof(struct fuse_in_header) + |
| 277 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 278 | list_add_tail(&req->list, &fc->pending); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 279 | req->state = FUSE_REQ_PENDING; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 280 | wake_up(&fc->waitq); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 281 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 282 | } |
| 283 | |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 284 | /* |
| 285 | * This can only be interrupted by a SIGKILL |
| 286 | */ |
| 287 | void request_send(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 288 | { |
| 289 | req->isreply = 1; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 290 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 291 | if (!fc->connected) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 292 | req->out.h.error = -ENOTCONN; |
| 293 | else if (fc->conn_error) |
| 294 | req->out.h.error = -ECONNREFUSED; |
| 295 | else { |
| 296 | queue_request(fc, req); |
| 297 | /* acquire extra reference, since request is still needed |
| 298 | after request_end() */ |
| 299 | __fuse_get_request(req); |
| 300 | |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 301 | request_wait_answer(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 302 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 303 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 306 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
| 307 | { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 308 | spin_lock(&fc->lock); |
Miklos Szeredi | 08a53cd | 2006-04-10 22:54:59 -0700 | [diff] [blame^] | 309 | background_request(fc, req); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 310 | if (fc->connected) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 311 | queue_request(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 312 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 313 | } else { |
| 314 | req->out.h.error = -ENOTCONN; |
| 315 | request_end(fc, req); |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) |
| 320 | { |
| 321 | req->isreply = 0; |
| 322 | request_send_nowait(fc, req); |
| 323 | } |
| 324 | |
| 325 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
| 326 | { |
| 327 | req->isreply = 1; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 328 | request_send_nowait(fc, req); |
| 329 | } |
| 330 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 331 | /* |
| 332 | * Lock the request. Up to the next unlock_request() there mustn't be |
| 333 | * anything that could cause a page-fault. If the request was already |
| 334 | * interrupted bail out. |
| 335 | */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 336 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 337 | { |
| 338 | int err = 0; |
| 339 | if (req) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 340 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 341 | if (req->interrupted) |
| 342 | err = -ENOENT; |
| 343 | else |
| 344 | req->locked = 1; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 345 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 346 | } |
| 347 | return err; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * Unlock request. If it was interrupted during being locked, the |
| 352 | * requester thread is currently waiting for it to be unlocked, so |
| 353 | * wake it up. |
| 354 | */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 355 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 356 | { |
| 357 | if (req) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 358 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 359 | req->locked = 0; |
| 360 | if (req->interrupted) |
| 361 | wake_up(&req->waitq); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 362 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 363 | } |
| 364 | } |
| 365 | |
| 366 | struct fuse_copy_state { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 367 | struct fuse_conn *fc; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 368 | int write; |
| 369 | struct fuse_req *req; |
| 370 | const struct iovec *iov; |
| 371 | unsigned long nr_segs; |
| 372 | unsigned long seglen; |
| 373 | unsigned long addr; |
| 374 | struct page *pg; |
| 375 | void *mapaddr; |
| 376 | void *buf; |
| 377 | unsigned len; |
| 378 | }; |
| 379 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 380 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
| 381 | int write, struct fuse_req *req, |
| 382 | const struct iovec *iov, unsigned long nr_segs) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 383 | { |
| 384 | memset(cs, 0, sizeof(*cs)); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 385 | cs->fc = fc; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 386 | cs->write = write; |
| 387 | cs->req = req; |
| 388 | cs->iov = iov; |
| 389 | cs->nr_segs = nr_segs; |
| 390 | } |
| 391 | |
| 392 | /* Unmap and put previous page of userspace buffer */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 393 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 394 | { |
| 395 | if (cs->mapaddr) { |
| 396 | kunmap_atomic(cs->mapaddr, KM_USER0); |
| 397 | if (cs->write) { |
| 398 | flush_dcache_page(cs->pg); |
| 399 | set_page_dirty_lock(cs->pg); |
| 400 | } |
| 401 | put_page(cs->pg); |
| 402 | cs->mapaddr = NULL; |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * Get another pagefull of userspace buffer, and map it to kernel |
| 408 | * address space, and lock request |
| 409 | */ |
| 410 | static int fuse_copy_fill(struct fuse_copy_state *cs) |
| 411 | { |
| 412 | unsigned long offset; |
| 413 | int err; |
| 414 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 415 | unlock_request(cs->fc, cs->req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 416 | fuse_copy_finish(cs); |
| 417 | if (!cs->seglen) { |
| 418 | BUG_ON(!cs->nr_segs); |
| 419 | cs->seglen = cs->iov[0].iov_len; |
| 420 | cs->addr = (unsigned long) cs->iov[0].iov_base; |
| 421 | cs->iov ++; |
| 422 | cs->nr_segs --; |
| 423 | } |
| 424 | down_read(¤t->mm->mmap_sem); |
| 425 | err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, |
| 426 | &cs->pg, NULL); |
| 427 | up_read(¤t->mm->mmap_sem); |
| 428 | if (err < 0) |
| 429 | return err; |
| 430 | BUG_ON(err != 1); |
| 431 | offset = cs->addr % PAGE_SIZE; |
| 432 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); |
| 433 | cs->buf = cs->mapaddr + offset; |
| 434 | cs->len = min(PAGE_SIZE - offset, cs->seglen); |
| 435 | cs->seglen -= cs->len; |
| 436 | cs->addr += cs->len; |
| 437 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 438 | return lock_request(cs->fc, cs->req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | /* Do as much copy to/from userspace buffer as we can */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 442 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 443 | { |
| 444 | unsigned ncpy = min(*size, cs->len); |
| 445 | if (val) { |
| 446 | if (cs->write) |
| 447 | memcpy(cs->buf, *val, ncpy); |
| 448 | else |
| 449 | memcpy(*val, cs->buf, ncpy); |
| 450 | *val += ncpy; |
| 451 | } |
| 452 | *size -= ncpy; |
| 453 | cs->len -= ncpy; |
| 454 | cs->buf += ncpy; |
| 455 | return ncpy; |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * Copy a page in the request to/from the userspace buffer. Must be |
| 460 | * done atomically |
| 461 | */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 462 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
| 463 | unsigned offset, unsigned count, int zeroing) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 464 | { |
| 465 | if (page && zeroing && count < PAGE_SIZE) { |
| 466 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| 467 | memset(mapaddr, 0, PAGE_SIZE); |
| 468 | kunmap_atomic(mapaddr, KM_USER1); |
| 469 | } |
| 470 | while (count) { |
| 471 | int err; |
| 472 | if (!cs->len && (err = fuse_copy_fill(cs))) |
| 473 | return err; |
| 474 | if (page) { |
| 475 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| 476 | void *buf = mapaddr + offset; |
| 477 | offset += fuse_copy_do(cs, &buf, &count); |
| 478 | kunmap_atomic(mapaddr, KM_USER1); |
| 479 | } else |
| 480 | offset += fuse_copy_do(cs, NULL, &count); |
| 481 | } |
| 482 | if (page && !cs->write) |
| 483 | flush_dcache_page(page); |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | /* Copy pages in the request to/from userspace buffer */ |
| 488 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, |
| 489 | int zeroing) |
| 490 | { |
| 491 | unsigned i; |
| 492 | struct fuse_req *req = cs->req; |
| 493 | unsigned offset = req->page_offset; |
| 494 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); |
| 495 | |
| 496 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { |
| 497 | struct page *page = req->pages[i]; |
| 498 | int err = fuse_copy_page(cs, page, offset, count, zeroing); |
| 499 | if (err) |
| 500 | return err; |
| 501 | |
| 502 | nbytes -= count; |
| 503 | count = min(nbytes, (unsigned) PAGE_SIZE); |
| 504 | offset = 0; |
| 505 | } |
| 506 | return 0; |
| 507 | } |
| 508 | |
| 509 | /* Copy a single argument in the request to/from userspace buffer */ |
| 510 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) |
| 511 | { |
| 512 | while (size) { |
| 513 | int err; |
| 514 | if (!cs->len && (err = fuse_copy_fill(cs))) |
| 515 | return err; |
| 516 | fuse_copy_do(cs, &val, &size); |
| 517 | } |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | /* Copy request arguments to/from userspace buffer */ |
| 522 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, |
| 523 | unsigned argpages, struct fuse_arg *args, |
| 524 | int zeroing) |
| 525 | { |
| 526 | int err = 0; |
| 527 | unsigned i; |
| 528 | |
| 529 | for (i = 0; !err && i < numargs; i++) { |
| 530 | struct fuse_arg *arg = &args[i]; |
| 531 | if (i == numargs - 1 && argpages) |
| 532 | err = fuse_copy_pages(cs, arg->size, zeroing); |
| 533 | else |
| 534 | err = fuse_copy_one(cs, arg->value, arg->size); |
| 535 | } |
| 536 | return err; |
| 537 | } |
| 538 | |
| 539 | /* Wait until a request is available on the pending list */ |
| 540 | static void request_wait(struct fuse_conn *fc) |
| 541 | { |
| 542 | DECLARE_WAITQUEUE(wait, current); |
| 543 | |
| 544 | add_wait_queue_exclusive(&fc->waitq, &wait); |
Miklos Szeredi | 9ba7cbb | 2006-01-16 22:14:34 -0800 | [diff] [blame] | 545 | while (fc->connected && list_empty(&fc->pending)) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 546 | set_current_state(TASK_INTERRUPTIBLE); |
| 547 | if (signal_pending(current)) |
| 548 | break; |
| 549 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 550 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 551 | schedule(); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 552 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 553 | } |
| 554 | set_current_state(TASK_RUNNING); |
| 555 | remove_wait_queue(&fc->waitq, &wait); |
| 556 | } |
| 557 | |
| 558 | /* |
| 559 | * Read a single request into the userspace filesystem's buffer. This |
| 560 | * function waits until a request is available, then removes it from |
| 561 | * the pending list and copies request data to userspace buffer. If |
| 562 | * no reply is needed (FORGET) or request has been interrupted or |
| 563 | * there was an error during the copying then it's finished by calling |
| 564 | * request_end(). Otherwise add it to the processing list, and set |
| 565 | * the 'sent' flag. |
| 566 | */ |
| 567 | static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, |
| 568 | unsigned long nr_segs, loff_t *off) |
| 569 | { |
| 570 | int err; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 571 | struct fuse_req *req; |
| 572 | struct fuse_in *in; |
| 573 | struct fuse_copy_state cs; |
| 574 | unsigned reqsize; |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 575 | struct fuse_conn *fc = fuse_get_conn(file); |
| 576 | if (!fc) |
| 577 | return -EPERM; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 578 | |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 579 | restart: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 580 | spin_lock(&fc->lock); |
Jeff Dike | e5ac1d1 | 2006-04-10 22:54:53 -0700 | [diff] [blame] | 581 | err = -EAGAIN; |
| 582 | if ((file->f_flags & O_NONBLOCK) && fc->connected && |
| 583 | list_empty(&fc->pending)) |
| 584 | goto err_unlock; |
| 585 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 586 | request_wait(fc); |
| 587 | err = -ENODEV; |
Miklos Szeredi | 9ba7cbb | 2006-01-16 22:14:34 -0800 | [diff] [blame] | 588 | if (!fc->connected) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 589 | goto err_unlock; |
| 590 | err = -ERESTARTSYS; |
| 591 | if (list_empty(&fc->pending)) |
| 592 | goto err_unlock; |
| 593 | |
| 594 | req = list_entry(fc->pending.next, struct fuse_req, list); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 595 | req->state = FUSE_REQ_READING; |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 596 | list_move(&req->list, &fc->io); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 597 | |
| 598 | in = &req->in; |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 599 | reqsize = in->h.len; |
| 600 | /* If request is too large, reply with an error and restart the read */ |
| 601 | if (iov_length(iov, nr_segs) < reqsize) { |
| 602 | req->out.h.error = -EIO; |
| 603 | /* SETXATTR is special, since it may contain too large data */ |
| 604 | if (in->h.opcode == FUSE_SETXATTR) |
| 605 | req->out.h.error = -E2BIG; |
| 606 | request_end(fc, req); |
| 607 | goto restart; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 608 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 609 | spin_unlock(&fc->lock); |
| 610 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 611 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); |
| 612 | if (!err) |
| 613 | err = fuse_copy_args(&cs, in->numargs, in->argpages, |
| 614 | (struct fuse_arg *) in->args, 0); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 615 | fuse_copy_finish(&cs); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 616 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 617 | req->locked = 0; |
| 618 | if (!err && req->interrupted) |
| 619 | err = -ENOENT; |
| 620 | if (err) { |
| 621 | if (!req->interrupted) |
| 622 | req->out.h.error = -EIO; |
| 623 | request_end(fc, req); |
| 624 | return err; |
| 625 | } |
| 626 | if (!req->isreply) |
| 627 | request_end(fc, req); |
| 628 | else { |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 629 | req->state = FUSE_REQ_SENT; |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 630 | list_move_tail(&req->list, &fc->processing); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 631 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 632 | } |
| 633 | return reqsize; |
| 634 | |
| 635 | err_unlock: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 636 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 637 | return err; |
| 638 | } |
| 639 | |
| 640 | static ssize_t fuse_dev_read(struct file *file, char __user *buf, |
| 641 | size_t nbytes, loff_t *off) |
| 642 | { |
| 643 | struct iovec iov; |
| 644 | iov.iov_len = nbytes; |
| 645 | iov.iov_base = buf; |
| 646 | return fuse_dev_readv(file, &iov, 1, off); |
| 647 | } |
| 648 | |
| 649 | /* Look up request on processing list by unique ID */ |
| 650 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) |
| 651 | { |
| 652 | struct list_head *entry; |
| 653 | |
| 654 | list_for_each(entry, &fc->processing) { |
| 655 | struct fuse_req *req; |
| 656 | req = list_entry(entry, struct fuse_req, list); |
| 657 | if (req->in.h.unique == unique) |
| 658 | return req; |
| 659 | } |
| 660 | return NULL; |
| 661 | } |
| 662 | |
| 663 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, |
| 664 | unsigned nbytes) |
| 665 | { |
| 666 | unsigned reqsize = sizeof(struct fuse_out_header); |
| 667 | |
| 668 | if (out->h.error) |
| 669 | return nbytes != reqsize ? -EINVAL : 0; |
| 670 | |
| 671 | reqsize += len_args(out->numargs, out->args); |
| 672 | |
| 673 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) |
| 674 | return -EINVAL; |
| 675 | else if (reqsize > nbytes) { |
| 676 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; |
| 677 | unsigned diffsize = reqsize - nbytes; |
| 678 | if (diffsize > lastarg->size) |
| 679 | return -EINVAL; |
| 680 | lastarg->size -= diffsize; |
| 681 | } |
| 682 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, |
| 683 | out->page_zeroing); |
| 684 | } |
| 685 | |
| 686 | /* |
| 687 | * Write a single reply to a request. First the header is copied from |
| 688 | * the write buffer. The request is then searched on the processing |
| 689 | * list by the unique ID found in the header. If found, then remove |
| 690 | * it from the list and copy the rest of the buffer to the request. |
| 691 | * The request is finished by calling request_end() |
| 692 | */ |
| 693 | static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, |
| 694 | unsigned long nr_segs, loff_t *off) |
| 695 | { |
| 696 | int err; |
| 697 | unsigned nbytes = iov_length(iov, nr_segs); |
| 698 | struct fuse_req *req; |
| 699 | struct fuse_out_header oh; |
| 700 | struct fuse_copy_state cs; |
| 701 | struct fuse_conn *fc = fuse_get_conn(file); |
| 702 | if (!fc) |
Miklos Szeredi | a87046d | 2006-04-10 22:54:56 -0700 | [diff] [blame] | 703 | return -EPERM; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 704 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 705 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 706 | if (nbytes < sizeof(struct fuse_out_header)) |
| 707 | return -EINVAL; |
| 708 | |
| 709 | err = fuse_copy_one(&cs, &oh, sizeof(oh)); |
| 710 | if (err) |
| 711 | goto err_finish; |
| 712 | err = -EINVAL; |
| 713 | if (!oh.unique || oh.error <= -1000 || oh.error > 0 || |
| 714 | oh.len != nbytes) |
| 715 | goto err_finish; |
| 716 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 717 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 718 | err = -ENOENT; |
| 719 | if (!fc->connected) |
| 720 | goto err_unlock; |
| 721 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 722 | req = request_find(fc, oh.unique); |
| 723 | err = -EINVAL; |
| 724 | if (!req) |
| 725 | goto err_unlock; |
| 726 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 727 | if (req->interrupted) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 728 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 729 | fuse_copy_finish(&cs); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 730 | spin_lock(&fc->lock); |
Miklos Szeredi | 222f1d69 | 2006-01-16 22:14:25 -0800 | [diff] [blame] | 731 | request_end(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 732 | return -ENOENT; |
| 733 | } |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 734 | list_move(&req->list, &fc->io); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 735 | req->out.h = oh; |
| 736 | req->locked = 1; |
| 737 | cs.req = req; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 738 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 739 | |
| 740 | err = copy_out_args(&cs, &req->out, nbytes); |
| 741 | fuse_copy_finish(&cs); |
| 742 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 743 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 744 | req->locked = 0; |
| 745 | if (!err) { |
| 746 | if (req->interrupted) |
| 747 | err = -ENOENT; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 748 | } else if (!req->interrupted) |
| 749 | req->out.h.error = -EIO; |
| 750 | request_end(fc, req); |
| 751 | |
| 752 | return err ? err : nbytes; |
| 753 | |
| 754 | err_unlock: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 755 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 756 | err_finish: |
| 757 | fuse_copy_finish(&cs); |
| 758 | return err; |
| 759 | } |
| 760 | |
| 761 | static ssize_t fuse_dev_write(struct file *file, const char __user *buf, |
| 762 | size_t nbytes, loff_t *off) |
| 763 | { |
| 764 | struct iovec iov; |
| 765 | iov.iov_len = nbytes; |
| 766 | iov.iov_base = (char __user *) buf; |
| 767 | return fuse_dev_writev(file, &iov, 1, off); |
| 768 | } |
| 769 | |
| 770 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) |
| 771 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 772 | unsigned mask = POLLOUT | POLLWRNORM; |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 773 | struct fuse_conn *fc = fuse_get_conn(file); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 774 | if (!fc) |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 775 | return POLLERR; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 776 | |
| 777 | poll_wait(file, &fc->waitq, wait); |
| 778 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 779 | spin_lock(&fc->lock); |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 780 | if (!fc->connected) |
| 781 | mask = POLLERR; |
| 782 | else if (!list_empty(&fc->pending)) |
| 783 | mask |= POLLIN | POLLRDNORM; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 784 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 785 | |
| 786 | return mask; |
| 787 | } |
| 788 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 789 | /* |
| 790 | * Abort all requests on the given list (pending or processing) |
| 791 | * |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 792 | * This function releases and reacquires fc->lock |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 793 | */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 794 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
| 795 | { |
| 796 | while (!list_empty(head)) { |
| 797 | struct fuse_req *req; |
| 798 | req = list_entry(head->next, struct fuse_req, list); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 799 | req->out.h.error = -ECONNABORTED; |
| 800 | request_end(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 801 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 802 | } |
| 803 | } |
| 804 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 805 | /* |
| 806 | * Abort requests under I/O |
| 807 | * |
| 808 | * The requests are set to interrupted and finished, and the request |
| 809 | * waiter is woken up. This will make request_wait_answer() wait |
| 810 | * until the request is unlocked and then return. |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 811 | * |
| 812 | * If the request is asynchronous, then the end function needs to be |
| 813 | * called after waiting for the request to be unlocked (if it was |
| 814 | * locked). |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 815 | */ |
| 816 | static void end_io_requests(struct fuse_conn *fc) |
| 817 | { |
| 818 | while (!list_empty(&fc->io)) { |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 819 | struct fuse_req *req = |
| 820 | list_entry(fc->io.next, struct fuse_req, list); |
| 821 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 822 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 823 | req->interrupted = 1; |
| 824 | req->out.h.error = -ECONNABORTED; |
| 825 | req->state = FUSE_REQ_FINISHED; |
| 826 | list_del_init(&req->list); |
| 827 | wake_up(&req->waitq); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 828 | if (end) { |
| 829 | req->end = NULL; |
| 830 | /* The end function will consume this reference */ |
| 831 | __fuse_get_request(req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 832 | spin_unlock(&fc->lock); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 833 | wait_event(req->waitq, !req->locked); |
| 834 | end(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 835 | spin_lock(&fc->lock); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 836 | } |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 837 | } |
| 838 | } |
| 839 | |
| 840 | /* |
| 841 | * Abort all requests. |
| 842 | * |
| 843 | * Emergency exit in case of a malicious or accidental deadlock, or |
| 844 | * just a hung filesystem. |
| 845 | * |
| 846 | * The same effect is usually achievable through killing the |
| 847 | * filesystem daemon and all users of the filesystem. The exception |
| 848 | * is the combination of an asynchronous request and the tricky |
| 849 | * deadlock (see Documentation/filesystems/fuse.txt). |
| 850 | * |
| 851 | * During the aborting, progression of requests from the pending and |
| 852 | * processing lists onto the io list, and progression of new requests |
| 853 | * onto the pending list is prevented by req->connected being false. |
| 854 | * |
| 855 | * Progression of requests under I/O to the processing list is |
| 856 | * prevented by the req->interrupted flag being true for these |
| 857 | * requests. For this reason requests on the io list must be aborted |
| 858 | * first. |
| 859 | */ |
| 860 | void fuse_abort_conn(struct fuse_conn *fc) |
| 861 | { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 862 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 863 | if (fc->connected) { |
| 864 | fc->connected = 0; |
| 865 | end_io_requests(fc); |
| 866 | end_requests(fc, &fc->pending); |
| 867 | end_requests(fc, &fc->processing); |
| 868 | wake_up_all(&fc->waitq); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 869 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 870 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 871 | spin_unlock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 872 | } |
| 873 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 874 | static int fuse_dev_release(struct inode *inode, struct file *file) |
| 875 | { |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 876 | struct fuse_conn *fc = fuse_get_conn(file); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 877 | if (fc) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 878 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 879 | fc->connected = 0; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 880 | end_requests(fc, &fc->pending); |
| 881 | end_requests(fc, &fc->processing); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 882 | spin_unlock(&fc->lock); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 883 | fasync_helper(-1, file, 0, &fc->fasync); |
Miklos Szeredi | f543f25 | 2006-01-16 22:14:35 -0800 | [diff] [blame] | 884 | kobject_put(&fc->kobj); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 885 | } |
Miklos Szeredi | f543f25 | 2006-01-16 22:14:35 -0800 | [diff] [blame] | 886 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 887 | return 0; |
| 888 | } |
| 889 | |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 890 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
| 891 | { |
| 892 | struct fuse_conn *fc = fuse_get_conn(file); |
| 893 | if (!fc) |
Miklos Szeredi | a87046d | 2006-04-10 22:54:56 -0700 | [diff] [blame] | 894 | return -EPERM; |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 895 | |
| 896 | /* No locking - fasync_helper does its own locking */ |
| 897 | return fasync_helper(fd, file, on, &fc->fasync); |
| 898 | } |
| 899 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 900 | const struct file_operations fuse_dev_operations = { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 901 | .owner = THIS_MODULE, |
| 902 | .llseek = no_llseek, |
| 903 | .read = fuse_dev_read, |
| 904 | .readv = fuse_dev_readv, |
| 905 | .write = fuse_dev_write, |
| 906 | .writev = fuse_dev_writev, |
| 907 | .poll = fuse_dev_poll, |
| 908 | .release = fuse_dev_release, |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 909 | .fasync = fuse_dev_fasync, |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 910 | }; |
| 911 | |
| 912 | static struct miscdevice fuse_miscdevice = { |
| 913 | .minor = FUSE_MINOR, |
| 914 | .name = "fuse", |
| 915 | .fops = &fuse_dev_operations, |
| 916 | }; |
| 917 | |
| 918 | int __init fuse_dev_init(void) |
| 919 | { |
| 920 | int err = -ENOMEM; |
| 921 | fuse_req_cachep = kmem_cache_create("fuse_request", |
| 922 | sizeof(struct fuse_req), |
| 923 | 0, 0, NULL, NULL); |
| 924 | if (!fuse_req_cachep) |
| 925 | goto out; |
| 926 | |
| 927 | err = misc_register(&fuse_miscdevice); |
| 928 | if (err) |
| 929 | goto out_cache_clean; |
| 930 | |
| 931 | return 0; |
| 932 | |
| 933 | out_cache_clean: |
| 934 | kmem_cache_destroy(fuse_req_cachep); |
| 935 | out: |
| 936 | return err; |
| 937 | } |
| 938 | |
| 939 | void fuse_dev_cleanup(void) |
| 940 | { |
| 941 | misc_deregister(&fuse_miscdevice); |
| 942 | kmem_cache_destroy(fuse_req_cachep); |
| 943 | } |