Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * virtio-fs: Virtio Filesystem |
| 4 | * Copyright (C) 2018 Red Hat, Inc. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/fs.h> |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 8 | #include <linux/dax.h> |
| 9 | #include <linux/pci.h> |
| 10 | #include <linux/pfn_t.h> |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/virtio.h> |
| 13 | #include <linux/virtio_fs.h> |
| 14 | #include <linux/delay.h> |
| 15 | #include <linux/fs_context.h> |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 16 | #include <linux/fs_parser.h> |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 17 | #include <linux/highmem.h> |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 18 | #include <linux/uio.h> |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 19 | #include "fuse_i.h" |
| 20 | |
| 21 | /* List of virtio-fs device instances and a lock for the list. Also provides |
| 22 | * mutual exclusion in device removal and mounting path |
| 23 | */ |
| 24 | static DEFINE_MUTEX(virtio_fs_mutex); |
| 25 | static LIST_HEAD(virtio_fs_instances); |
| 26 | |
| 27 | enum { |
| 28 | VQ_HIPRIO, |
| 29 | VQ_REQUEST |
| 30 | }; |
| 31 | |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 32 | #define VQ_NAME_LEN 24 |
| 33 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 34 | /* Per-virtqueue state */ |
| 35 | struct virtio_fs_vq { |
| 36 | spinlock_t lock; |
| 37 | struct virtqueue *vq; /* protected by ->lock */ |
| 38 | struct work_struct done_work; |
| 39 | struct list_head queued_reqs; |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 40 | struct list_head end_reqs; /* End these requests */ |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 41 | struct delayed_work dispatch_work; |
| 42 | struct fuse_dev *fud; |
| 43 | bool connected; |
| 44 | long in_flight; |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 45 | struct completion in_flight_zero; /* No inflight requests */ |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 46 | char name[VQ_NAME_LEN]; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 47 | } ____cacheline_aligned_in_smp; |
| 48 | |
| 49 | /* A virtio-fs device instance */ |
| 50 | struct virtio_fs { |
| 51 | struct kref refcount; |
| 52 | struct list_head list; /* on virtio_fs_instances */ |
| 53 | char *tag; |
| 54 | struct virtio_fs_vq *vqs; |
| 55 | unsigned int nvqs; /* number of virtqueues */ |
| 56 | unsigned int num_request_queues; /* number of request queues */ |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 57 | struct dax_device *dax_dev; |
| 58 | |
| 59 | /* DAX memory window where file contents are mapped */ |
| 60 | void *window_kaddr; |
| 61 | phys_addr_t window_phys_addr; |
| 62 | size_t window_len; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 63 | }; |
| 64 | |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 65 | struct virtio_fs_forget_req { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 66 | struct fuse_in_header ih; |
| 67 | struct fuse_forget_in arg; |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 68 | }; |
| 69 | |
| 70 | struct virtio_fs_forget { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 71 | /* This request can be temporarily queued on virt queue */ |
| 72 | struct list_head list; |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 73 | struct virtio_fs_forget_req req; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 74 | }; |
| 75 | |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 76 | struct virtio_fs_req_work { |
| 77 | struct fuse_req *req; |
| 78 | struct virtio_fs_vq *fsvq; |
| 79 | struct work_struct done_work; |
| 80 | }; |
| 81 | |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 82 | static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, |
| 83 | struct fuse_req *req, bool in_flight); |
| 84 | |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 85 | enum { |
| 86 | OPT_DAX, |
| 87 | }; |
| 88 | |
| 89 | static const struct fs_parameter_spec virtio_fs_parameters[] = { |
| 90 | fsparam_flag("dax", OPT_DAX), |
| 91 | {} |
| 92 | }; |
| 93 | |
| 94 | static int virtio_fs_parse_param(struct fs_context *fc, |
| 95 | struct fs_parameter *param) |
| 96 | { |
| 97 | struct fs_parse_result result; |
| 98 | struct fuse_fs_context *ctx = fc->fs_private; |
| 99 | int opt; |
| 100 | |
| 101 | opt = fs_parse(fc, virtio_fs_parameters, param, &result); |
| 102 | if (opt < 0) |
| 103 | return opt; |
| 104 | |
| 105 | switch (opt) { |
| 106 | case OPT_DAX: |
| 107 | ctx->dax = 1; |
| 108 | break; |
| 109 | default: |
| 110 | return -EINVAL; |
| 111 | } |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | static void virtio_fs_free_fc(struct fs_context *fc) |
| 117 | { |
| 118 | struct fuse_fs_context *ctx = fc->fs_private; |
| 119 | |
| 120 | kfree(ctx); |
| 121 | } |
| 122 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 123 | static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq) |
| 124 | { |
| 125 | struct virtio_fs *fs = vq->vdev->priv; |
| 126 | |
| 127 | return &fs->vqs[vq->index]; |
| 128 | } |
| 129 | |
| 130 | static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq) |
| 131 | { |
| 132 | return &vq_to_fsvq(vq)->fud->pq; |
| 133 | } |
| 134 | |
Vivek Goyal | c17ea00 | 2019-10-15 13:46:25 -0400 | [diff] [blame] | 135 | /* Should be called with fsvq->lock held. */ |
| 136 | static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq) |
| 137 | { |
| 138 | fsvq->in_flight++; |
| 139 | } |
| 140 | |
| 141 | /* Should be called with fsvq->lock held. */ |
| 142 | static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq) |
| 143 | { |
| 144 | WARN_ON(fsvq->in_flight <= 0); |
| 145 | fsvq->in_flight--; |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 146 | if (!fsvq->in_flight) |
| 147 | complete(&fsvq->in_flight_zero); |
Vivek Goyal | c17ea00 | 2019-10-15 13:46:25 -0400 | [diff] [blame] | 148 | } |
| 149 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 150 | static void release_virtio_fs_obj(struct kref *ref) |
| 151 | { |
| 152 | struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); |
| 153 | |
| 154 | kfree(vfs->vqs); |
| 155 | kfree(vfs); |
| 156 | } |
| 157 | |
| 158 | /* Make sure virtiofs_mutex is held */ |
| 159 | static void virtio_fs_put(struct virtio_fs *fs) |
| 160 | { |
| 161 | kref_put(&fs->refcount, release_virtio_fs_obj); |
| 162 | } |
| 163 | |
| 164 | static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) |
| 165 | { |
| 166 | struct virtio_fs *vfs = fiq->priv; |
| 167 | |
| 168 | mutex_lock(&virtio_fs_mutex); |
| 169 | virtio_fs_put(vfs); |
| 170 | mutex_unlock(&virtio_fs_mutex); |
| 171 | } |
| 172 | |
| 173 | static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) |
| 174 | { |
| 175 | WARN_ON(fsvq->in_flight < 0); |
| 176 | |
| 177 | /* Wait for in flight requests to finish.*/ |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 178 | spin_lock(&fsvq->lock); |
| 179 | if (fsvq->in_flight) { |
| 180 | /* We are holding virtio_fs_mutex. There should not be any |
| 181 | * waiters waiting for completion. |
| 182 | */ |
| 183 | reinit_completion(&fsvq->in_flight_zero); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 184 | spin_unlock(&fsvq->lock); |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 185 | wait_for_completion(&fsvq->in_flight_zero); |
| 186 | } else { |
| 187 | spin_unlock(&fsvq->lock); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | flush_work(&fsvq->done_work); |
| 191 | flush_delayed_work(&fsvq->dispatch_work); |
| 192 | } |
| 193 | |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 194 | static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 195 | { |
| 196 | struct virtio_fs_vq *fsvq; |
| 197 | int i; |
| 198 | |
| 199 | for (i = 0; i < fs->nvqs; i++) { |
| 200 | fsvq = &fs->vqs[i]; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 201 | virtio_fs_drain_queue(fsvq); |
| 202 | } |
| 203 | } |
| 204 | |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 205 | static void virtio_fs_drain_all_queues(struct virtio_fs *fs) |
| 206 | { |
| 207 | /* Provides mutual exclusion between ->remove and ->kill_sb |
| 208 | * paths. We don't want both of these draining queue at the |
| 209 | * same time. Current completion logic reinits completion |
| 210 | * and that means there should not be any other thread |
| 211 | * doing reinit or waiting for completion already. |
| 212 | */ |
| 213 | mutex_lock(&virtio_fs_mutex); |
| 214 | virtio_fs_drain_all_queues_locked(fs); |
| 215 | mutex_unlock(&virtio_fs_mutex); |
| 216 | } |
| 217 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 218 | static void virtio_fs_start_all_queues(struct virtio_fs *fs) |
| 219 | { |
| 220 | struct virtio_fs_vq *fsvq; |
| 221 | int i; |
| 222 | |
| 223 | for (i = 0; i < fs->nvqs; i++) { |
| 224 | fsvq = &fs->vqs[i]; |
| 225 | spin_lock(&fsvq->lock); |
| 226 | fsvq->connected = true; |
| 227 | spin_unlock(&fsvq->lock); |
| 228 | } |
| 229 | } |
| 230 | |
| 231 | /* Add a new instance to the list or return -EEXIST if tag name exists*/ |
| 232 | static int virtio_fs_add_instance(struct virtio_fs *fs) |
| 233 | { |
| 234 | struct virtio_fs *fs2; |
| 235 | bool duplicate = false; |
| 236 | |
| 237 | mutex_lock(&virtio_fs_mutex); |
| 238 | |
| 239 | list_for_each_entry(fs2, &virtio_fs_instances, list) { |
| 240 | if (strcmp(fs->tag, fs2->tag) == 0) |
| 241 | duplicate = true; |
| 242 | } |
| 243 | |
| 244 | if (!duplicate) |
| 245 | list_add_tail(&fs->list, &virtio_fs_instances); |
| 246 | |
| 247 | mutex_unlock(&virtio_fs_mutex); |
| 248 | |
| 249 | if (duplicate) |
| 250 | return -EEXIST; |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | /* Return the virtio_fs with a given tag, or NULL */ |
| 255 | static struct virtio_fs *virtio_fs_find_instance(const char *tag) |
| 256 | { |
| 257 | struct virtio_fs *fs; |
| 258 | |
| 259 | mutex_lock(&virtio_fs_mutex); |
| 260 | |
| 261 | list_for_each_entry(fs, &virtio_fs_instances, list) { |
| 262 | if (strcmp(fs->tag, tag) == 0) { |
| 263 | kref_get(&fs->refcount); |
| 264 | goto found; |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | fs = NULL; /* not found */ |
| 269 | |
| 270 | found: |
| 271 | mutex_unlock(&virtio_fs_mutex); |
| 272 | |
| 273 | return fs; |
| 274 | } |
| 275 | |
| 276 | static void virtio_fs_free_devs(struct virtio_fs *fs) |
| 277 | { |
| 278 | unsigned int i; |
| 279 | |
| 280 | for (i = 0; i < fs->nvqs; i++) { |
| 281 | struct virtio_fs_vq *fsvq = &fs->vqs[i]; |
| 282 | |
| 283 | if (!fsvq->fud) |
| 284 | continue; |
| 285 | |
| 286 | fuse_dev_free(fsvq->fud); |
| 287 | fsvq->fud = NULL; |
| 288 | } |
| 289 | } |
| 290 | |
| 291 | /* Read filesystem name from virtio config into fs->tag (must kfree()). */ |
| 292 | static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) |
| 293 | { |
| 294 | char tag_buf[sizeof_field(struct virtio_fs_config, tag)]; |
| 295 | char *end; |
| 296 | size_t len; |
| 297 | |
| 298 | virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag), |
| 299 | &tag_buf, sizeof(tag_buf)); |
| 300 | end = memchr(tag_buf, '\0', sizeof(tag_buf)); |
| 301 | if (end == tag_buf) |
| 302 | return -EINVAL; /* empty tag */ |
| 303 | if (!end) |
| 304 | end = &tag_buf[sizeof(tag_buf)]; |
| 305 | |
| 306 | len = end - tag_buf; |
| 307 | fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL); |
| 308 | if (!fs->tag) |
| 309 | return -ENOMEM; |
| 310 | memcpy(fs->tag, tag_buf, len); |
| 311 | fs->tag[len] = '\0'; |
| 312 | return 0; |
| 313 | } |
| 314 | |
| 315 | /* Work function for hiprio completion */ |
| 316 | static void virtio_fs_hiprio_done_work(struct work_struct *work) |
| 317 | { |
| 318 | struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, |
| 319 | done_work); |
| 320 | struct virtqueue *vq = fsvq->vq; |
| 321 | |
| 322 | /* Free completed FUSE_FORGET requests */ |
| 323 | spin_lock(&fsvq->lock); |
| 324 | do { |
| 325 | unsigned int len; |
| 326 | void *req; |
| 327 | |
| 328 | virtqueue_disable_cb(vq); |
| 329 | |
| 330 | while ((req = virtqueue_get_buf(vq, &len)) != NULL) { |
| 331 | kfree(req); |
Vivek Goyal | c17ea00 | 2019-10-15 13:46:25 -0400 | [diff] [blame] | 332 | dec_in_flight_req(fsvq); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 333 | } |
| 334 | } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); |
| 335 | spin_unlock(&fsvq->lock); |
| 336 | } |
| 337 | |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 338 | static void virtio_fs_request_dispatch_work(struct work_struct *work) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 339 | { |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 340 | struct fuse_req *req; |
| 341 | struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, |
| 342 | dispatch_work.work); |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 343 | int ret; |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 344 | |
| 345 | pr_debug("virtio-fs: worker %s called.\n", __func__); |
| 346 | while (1) { |
| 347 | spin_lock(&fsvq->lock); |
| 348 | req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req, |
| 349 | list); |
| 350 | if (!req) { |
| 351 | spin_unlock(&fsvq->lock); |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 352 | break; |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | list_del_init(&req->list); |
| 356 | spin_unlock(&fsvq->lock); |
Max Reitz | 8f622e9 | 2020-04-20 17:59:34 +0200 | [diff] [blame] | 357 | fuse_request_end(req); |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 358 | } |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 359 | |
| 360 | /* Dispatch pending requests */ |
| 361 | while (1) { |
| 362 | spin_lock(&fsvq->lock); |
| 363 | req = list_first_entry_or_null(&fsvq->queued_reqs, |
| 364 | struct fuse_req, list); |
| 365 | if (!req) { |
| 366 | spin_unlock(&fsvq->lock); |
| 367 | return; |
| 368 | } |
| 369 | list_del_init(&req->list); |
| 370 | spin_unlock(&fsvq->lock); |
| 371 | |
| 372 | ret = virtio_fs_enqueue_req(fsvq, req, true); |
| 373 | if (ret < 0) { |
| 374 | if (ret == -ENOMEM || ret == -ENOSPC) { |
| 375 | spin_lock(&fsvq->lock); |
| 376 | list_add_tail(&req->list, &fsvq->queued_reqs); |
| 377 | schedule_delayed_work(&fsvq->dispatch_work, |
| 378 | msecs_to_jiffies(1)); |
| 379 | spin_unlock(&fsvq->lock); |
| 380 | return; |
| 381 | } |
| 382 | req->out.h.error = ret; |
| 383 | spin_lock(&fsvq->lock); |
| 384 | dec_in_flight_req(fsvq); |
| 385 | spin_unlock(&fsvq->lock); |
| 386 | pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", |
| 387 | ret); |
Max Reitz | 8f622e9 | 2020-04-20 17:59:34 +0200 | [diff] [blame] | 388 | fuse_request_end(req); |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 389 | } |
| 390 | } |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 391 | } |
| 392 | |
Vivek Goyal | 58ada94 | 2019-10-30 11:07:17 -0400 | [diff] [blame] | 393 | /* |
| 394 | * Returns 1 if queue is full and sender should wait a bit before sending |
| 395 | * next request, 0 otherwise. |
| 396 | */ |
| 397 | static int send_forget_request(struct virtio_fs_vq *fsvq, |
| 398 | struct virtio_fs_forget *forget, |
| 399 | bool in_flight) |
| 400 | { |
| 401 | struct scatterlist sg; |
| 402 | struct virtqueue *vq; |
| 403 | int ret = 0; |
| 404 | bool notify; |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 405 | struct virtio_fs_forget_req *req = &forget->req; |
Vivek Goyal | 58ada94 | 2019-10-30 11:07:17 -0400 | [diff] [blame] | 406 | |
| 407 | spin_lock(&fsvq->lock); |
| 408 | if (!fsvq->connected) { |
| 409 | if (in_flight) |
| 410 | dec_in_flight_req(fsvq); |
| 411 | kfree(forget); |
| 412 | goto out; |
| 413 | } |
| 414 | |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 415 | sg_init_one(&sg, req, sizeof(*req)); |
Vivek Goyal | 58ada94 | 2019-10-30 11:07:17 -0400 | [diff] [blame] | 416 | vq = fsvq->vq; |
| 417 | dev_dbg(&vq->vdev->dev, "%s\n", __func__); |
| 418 | |
| 419 | ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC); |
| 420 | if (ret < 0) { |
| 421 | if (ret == -ENOMEM || ret == -ENOSPC) { |
| 422 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", |
| 423 | ret); |
| 424 | list_add_tail(&forget->list, &fsvq->queued_reqs); |
| 425 | schedule_delayed_work(&fsvq->dispatch_work, |
| 426 | msecs_to_jiffies(1)); |
| 427 | if (!in_flight) |
| 428 | inc_in_flight_req(fsvq); |
| 429 | /* Queue is full */ |
| 430 | ret = 1; |
| 431 | } else { |
| 432 | pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", |
| 433 | ret); |
| 434 | kfree(forget); |
| 435 | if (in_flight) |
| 436 | dec_in_flight_req(fsvq); |
| 437 | } |
| 438 | goto out; |
| 439 | } |
| 440 | |
| 441 | if (!in_flight) |
| 442 | inc_in_flight_req(fsvq); |
| 443 | notify = virtqueue_kick_prepare(vq); |
| 444 | spin_unlock(&fsvq->lock); |
| 445 | |
| 446 | if (notify) |
| 447 | virtqueue_notify(vq); |
| 448 | return ret; |
| 449 | out: |
| 450 | spin_unlock(&fsvq->lock); |
| 451 | return ret; |
| 452 | } |
| 453 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 454 | static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) |
| 455 | { |
| 456 | struct virtio_fs_forget *forget; |
| 457 | struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, |
| 458 | dispatch_work.work); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 459 | pr_debug("virtio-fs: worker %s called.\n", __func__); |
| 460 | while (1) { |
| 461 | spin_lock(&fsvq->lock); |
| 462 | forget = list_first_entry_or_null(&fsvq->queued_reqs, |
| 463 | struct virtio_fs_forget, list); |
| 464 | if (!forget) { |
| 465 | spin_unlock(&fsvq->lock); |
| 466 | return; |
| 467 | } |
| 468 | |
| 469 | list_del(&forget->list); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 470 | spin_unlock(&fsvq->lock); |
Vivek Goyal | 58ada94 | 2019-10-30 11:07:17 -0400 | [diff] [blame] | 471 | if (send_forget_request(fsvq, forget, true)) |
| 472 | return; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 473 | } |
| 474 | } |
| 475 | |
| 476 | /* Allocate and copy args into req->argbuf */ |
| 477 | static int copy_args_to_argbuf(struct fuse_req *req) |
| 478 | { |
| 479 | struct fuse_args *args = req->args; |
| 480 | unsigned int offset = 0; |
| 481 | unsigned int num_in; |
| 482 | unsigned int num_out; |
| 483 | unsigned int len; |
| 484 | unsigned int i; |
| 485 | |
| 486 | num_in = args->in_numargs - args->in_pages; |
| 487 | num_out = args->out_numargs - args->out_pages; |
| 488 | len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + |
| 489 | fuse_len_args(num_out, args->out_args); |
| 490 | |
| 491 | req->argbuf = kmalloc(len, GFP_ATOMIC); |
| 492 | if (!req->argbuf) |
| 493 | return -ENOMEM; |
| 494 | |
| 495 | for (i = 0; i < num_in; i++) { |
| 496 | memcpy(req->argbuf + offset, |
| 497 | args->in_args[i].value, |
| 498 | args->in_args[i].size); |
| 499 | offset += args->in_args[i].size; |
| 500 | } |
| 501 | |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | /* Copy args out of and free req->argbuf */ |
| 506 | static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) |
| 507 | { |
| 508 | unsigned int remaining; |
| 509 | unsigned int offset; |
| 510 | unsigned int num_in; |
| 511 | unsigned int num_out; |
| 512 | unsigned int i; |
| 513 | |
| 514 | remaining = req->out.h.len - sizeof(req->out.h); |
| 515 | num_in = args->in_numargs - args->in_pages; |
| 516 | num_out = args->out_numargs - args->out_pages; |
| 517 | offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args); |
| 518 | |
| 519 | for (i = 0; i < num_out; i++) { |
| 520 | unsigned int argsize = args->out_args[i].size; |
| 521 | |
| 522 | if (args->out_argvar && |
| 523 | i == args->out_numargs - 1 && |
| 524 | argsize > remaining) { |
| 525 | argsize = remaining; |
| 526 | } |
| 527 | |
| 528 | memcpy(args->out_args[i].value, req->argbuf + offset, argsize); |
| 529 | offset += argsize; |
| 530 | |
| 531 | if (i != args->out_numargs - 1) |
| 532 | remaining -= argsize; |
| 533 | } |
| 534 | |
| 535 | /* Store the actual size of the variable-length arg */ |
| 536 | if (args->out_argvar) |
| 537 | args->out_args[args->out_numargs - 1].size = remaining; |
| 538 | |
| 539 | kfree(req->argbuf); |
| 540 | req->argbuf = NULL; |
| 541 | } |
| 542 | |
| 543 | /* Work function for request completion */ |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 544 | static void virtio_fs_request_complete(struct fuse_req *req, |
| 545 | struct virtio_fs_vq *fsvq) |
| 546 | { |
| 547 | struct fuse_pqueue *fpq = &fsvq->fud->pq; |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 548 | struct fuse_args *args; |
| 549 | struct fuse_args_pages *ap; |
| 550 | unsigned int len, i, thislen; |
| 551 | struct page *page; |
| 552 | |
| 553 | /* |
| 554 | * TODO verify that server properly follows FUSE protocol |
| 555 | * (oh.uniq, oh.len) |
| 556 | */ |
| 557 | args = req->args; |
| 558 | copy_args_from_argbuf(args, req); |
| 559 | |
| 560 | if (args->out_pages && args->page_zeroing) { |
| 561 | len = args->out_args[args->out_numargs - 1].size; |
| 562 | ap = container_of(args, typeof(*ap), args); |
| 563 | for (i = 0; i < ap->num_pages; i++) { |
| 564 | thislen = ap->descs[i].length; |
| 565 | if (len < thislen) { |
| 566 | WARN_ON(ap->descs[i].offset); |
| 567 | page = ap->pages[i]; |
| 568 | zero_user_segment(page, len, thislen); |
| 569 | len = 0; |
| 570 | } else { |
| 571 | len -= thislen; |
| 572 | } |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | spin_lock(&fpq->lock); |
| 577 | clear_bit(FR_SENT, &req->flags); |
| 578 | spin_unlock(&fpq->lock); |
| 579 | |
Max Reitz | 8f622e9 | 2020-04-20 17:59:34 +0200 | [diff] [blame] | 580 | fuse_request_end(req); |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 581 | spin_lock(&fsvq->lock); |
| 582 | dec_in_flight_req(fsvq); |
| 583 | spin_unlock(&fsvq->lock); |
| 584 | } |
| 585 | |
| 586 | static void virtio_fs_complete_req_work(struct work_struct *work) |
| 587 | { |
| 588 | struct virtio_fs_req_work *w = |
| 589 | container_of(work, typeof(*w), done_work); |
| 590 | |
| 591 | virtio_fs_request_complete(w->req, w->fsvq); |
| 592 | kfree(w); |
| 593 | } |
| 594 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 595 | static void virtio_fs_requests_done_work(struct work_struct *work) |
| 596 | { |
| 597 | struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, |
| 598 | done_work); |
| 599 | struct fuse_pqueue *fpq = &fsvq->fud->pq; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 600 | struct virtqueue *vq = fsvq->vq; |
| 601 | struct fuse_req *req; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 602 | struct fuse_req *next; |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 603 | unsigned int len; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 604 | LIST_HEAD(reqs); |
| 605 | |
| 606 | /* Collect completed requests off the virtqueue */ |
| 607 | spin_lock(&fsvq->lock); |
| 608 | do { |
| 609 | virtqueue_disable_cb(vq); |
| 610 | |
| 611 | while ((req = virtqueue_get_buf(vq, &len)) != NULL) { |
| 612 | spin_lock(&fpq->lock); |
| 613 | list_move_tail(&req->list, &reqs); |
| 614 | spin_unlock(&fpq->lock); |
| 615 | } |
| 616 | } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); |
| 617 | spin_unlock(&fsvq->lock); |
| 618 | |
| 619 | /* End requests */ |
| 620 | list_for_each_entry_safe(req, next, &reqs, list) { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 621 | list_del_init(&req->list); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 622 | |
Vivek Goyal | bb737bb | 2020-04-20 17:01:34 +0200 | [diff] [blame] | 623 | /* blocking async request completes in a worker context */ |
| 624 | if (req->args->may_block) { |
| 625 | struct virtio_fs_req_work *w; |
| 626 | |
| 627 | w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL); |
| 628 | INIT_WORK(&w->done_work, virtio_fs_complete_req_work); |
| 629 | w->fsvq = fsvq; |
| 630 | w->req = req; |
| 631 | schedule_work(&w->done_work); |
| 632 | } else { |
| 633 | virtio_fs_request_complete(req, fsvq); |
| 634 | } |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 635 | } |
| 636 | } |
| 637 | |
| 638 | /* Virtqueue interrupt handler */ |
| 639 | static void virtio_fs_vq_done(struct virtqueue *vq) |
| 640 | { |
| 641 | struct virtio_fs_vq *fsvq = vq_to_fsvq(vq); |
| 642 | |
| 643 | dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name); |
| 644 | |
| 645 | schedule_work(&fsvq->done_work); |
| 646 | } |
| 647 | |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 648 | static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name, |
| 649 | int vq_type) |
| 650 | { |
| 651 | strncpy(fsvq->name, name, VQ_NAME_LEN); |
| 652 | spin_lock_init(&fsvq->lock); |
| 653 | INIT_LIST_HEAD(&fsvq->queued_reqs); |
| 654 | INIT_LIST_HEAD(&fsvq->end_reqs); |
| 655 | init_completion(&fsvq->in_flight_zero); |
| 656 | |
| 657 | if (vq_type == VQ_REQUEST) { |
| 658 | INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work); |
| 659 | INIT_DELAYED_WORK(&fsvq->dispatch_work, |
| 660 | virtio_fs_request_dispatch_work); |
| 661 | } else { |
| 662 | INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work); |
| 663 | INIT_DELAYED_WORK(&fsvq->dispatch_work, |
| 664 | virtio_fs_hiprio_dispatch_work); |
| 665 | } |
| 666 | } |
| 667 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 668 | /* Initialize virtqueues */ |
| 669 | static int virtio_fs_setup_vqs(struct virtio_device *vdev, |
| 670 | struct virtio_fs *fs) |
| 671 | { |
| 672 | struct virtqueue **vqs; |
| 673 | vq_callback_t **callbacks; |
| 674 | const char **names; |
| 675 | unsigned int i; |
| 676 | int ret = 0; |
| 677 | |
Michael S. Tsirkin | 2c0349e | 2020-08-05 05:39:36 -0400 | [diff] [blame] | 678 | virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues, |
| 679 | &fs->num_request_queues); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 680 | if (fs->num_request_queues == 0) |
| 681 | return -EINVAL; |
| 682 | |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 683 | fs->nvqs = VQ_REQUEST + fs->num_request_queues; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 684 | fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); |
| 685 | if (!fs->vqs) |
| 686 | return -ENOMEM; |
| 687 | |
| 688 | vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); |
| 689 | callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), |
| 690 | GFP_KERNEL); |
| 691 | names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); |
| 692 | if (!vqs || !callbacks || !names) { |
| 693 | ret = -ENOMEM; |
| 694 | goto out; |
| 695 | } |
| 696 | |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 697 | /* Initialize the hiprio/forget request virtqueue */ |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 698 | callbacks[VQ_HIPRIO] = virtio_fs_vq_done; |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 699 | virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 700 | names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 701 | |
| 702 | /* Initialize the requests virtqueues */ |
| 703 | for (i = VQ_REQUEST; i < fs->nvqs; i++) { |
Vivek Goyal | b43b7e8 | 2020-08-19 18:19:44 -0400 | [diff] [blame] | 704 | char vq_name[VQ_NAME_LEN]; |
| 705 | |
| 706 | snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST); |
| 707 | virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 708 | callbacks[i] = virtio_fs_vq_done; |
| 709 | names[i] = fs->vqs[i].name; |
| 710 | } |
| 711 | |
| 712 | ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); |
| 713 | if (ret < 0) |
| 714 | goto out; |
| 715 | |
| 716 | for (i = 0; i < fs->nvqs; i++) |
| 717 | fs->vqs[i].vq = vqs[i]; |
| 718 | |
| 719 | virtio_fs_start_all_queues(fs); |
| 720 | out: |
| 721 | kfree(names); |
| 722 | kfree(callbacks); |
| 723 | kfree(vqs); |
| 724 | if (ret) |
| 725 | kfree(fs->vqs); |
| 726 | return ret; |
| 727 | } |
| 728 | |
| 729 | /* Free virtqueues (device must already be reset) */ |
| 730 | static void virtio_fs_cleanup_vqs(struct virtio_device *vdev, |
| 731 | struct virtio_fs *fs) |
| 732 | { |
| 733 | vdev->config->del_vqs(vdev); |
| 734 | } |
| 735 | |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 736 | /* Map a window offset to a page frame number. The window offset will have |
| 737 | * been produced by .iomap_begin(), which maps a file offset to a window |
| 738 | * offset. |
| 739 | */ |
| 740 | static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
| 741 | long nr_pages, void **kaddr, pfn_t *pfn) |
| 742 | { |
| 743 | struct virtio_fs *fs = dax_get_private(dax_dev); |
| 744 | phys_addr_t offset = PFN_PHYS(pgoff); |
| 745 | size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff; |
| 746 | |
| 747 | if (kaddr) |
| 748 | *kaddr = fs->window_kaddr + offset; |
| 749 | if (pfn) |
| 750 | *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, |
| 751 | PFN_DEV | PFN_MAP); |
| 752 | return nr_pages > max_nr_pages ? max_nr_pages : nr_pages; |
| 753 | } |
| 754 | |
| 755 | static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev, |
| 756 | pgoff_t pgoff, void *addr, |
| 757 | size_t bytes, struct iov_iter *i) |
| 758 | { |
| 759 | return copy_from_iter(addr, bytes, i); |
| 760 | } |
| 761 | |
| 762 | static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev, |
| 763 | pgoff_t pgoff, void *addr, |
| 764 | size_t bytes, struct iov_iter *i) |
| 765 | { |
| 766 | return copy_to_iter(addr, bytes, i); |
| 767 | } |
| 768 | |
| 769 | static int virtio_fs_zero_page_range(struct dax_device *dax_dev, |
| 770 | pgoff_t pgoff, size_t nr_pages) |
| 771 | { |
| 772 | long rc; |
| 773 | void *kaddr; |
| 774 | |
| 775 | rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL); |
| 776 | if (rc < 0) |
| 777 | return rc; |
| 778 | memset(kaddr, 0, nr_pages << PAGE_SHIFT); |
| 779 | dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT); |
| 780 | return 0; |
| 781 | } |
| 782 | |
| 783 | static const struct dax_operations virtio_fs_dax_ops = { |
| 784 | .direct_access = virtio_fs_direct_access, |
| 785 | .copy_from_iter = virtio_fs_copy_from_iter, |
| 786 | .copy_to_iter = virtio_fs_copy_to_iter, |
| 787 | .zero_page_range = virtio_fs_zero_page_range, |
| 788 | }; |
| 789 | |
| 790 | static void virtio_fs_cleanup_dax(void *data) |
| 791 | { |
| 792 | struct dax_device *dax_dev = data; |
| 793 | |
| 794 | kill_dax(dax_dev); |
| 795 | put_dax(dax_dev); |
| 796 | } |
| 797 | |
| 798 | static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs) |
| 799 | { |
| 800 | struct virtio_shm_region cache_reg; |
| 801 | struct dev_pagemap *pgmap; |
| 802 | bool have_cache; |
| 803 | |
| 804 | if (!IS_ENABLED(CONFIG_FUSE_DAX)) |
| 805 | return 0; |
| 806 | |
| 807 | /* Get cache region */ |
| 808 | have_cache = virtio_get_shm_region(vdev, &cache_reg, |
| 809 | (u8)VIRTIO_FS_SHMCAP_ID_CACHE); |
| 810 | if (!have_cache) { |
| 811 | dev_notice(&vdev->dev, "%s: No cache capability\n", __func__); |
| 812 | return 0; |
| 813 | } |
| 814 | |
| 815 | if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len, |
| 816 | dev_name(&vdev->dev))) { |
| 817 | dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n", |
| 818 | cache_reg.addr, cache_reg.len); |
| 819 | return -EBUSY; |
| 820 | } |
| 821 | |
| 822 | dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len, |
| 823 | cache_reg.addr); |
| 824 | |
| 825 | pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); |
| 826 | if (!pgmap) |
| 827 | return -ENOMEM; |
| 828 | |
| 829 | pgmap->type = MEMORY_DEVICE_FS_DAX; |
| 830 | |
| 831 | /* Ideally we would directly use the PCI BAR resource but |
| 832 | * devm_memremap_pages() wants its own copy in pgmap. So |
| 833 | * initialize a struct resource from scratch (only the start |
| 834 | * and end fields will be used). |
| 835 | */ |
Linus Torvalds | 6945653 | 2020-10-19 14:28:30 -0700 | [diff] [blame] | 836 | pgmap->range = (struct range) { |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 837 | .start = (phys_addr_t) cache_reg.addr, |
| 838 | .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1, |
| 839 | }; |
Linus Torvalds | 6945653 | 2020-10-19 14:28:30 -0700 | [diff] [blame] | 840 | pgmap->nr_range = 1; |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 841 | |
| 842 | fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); |
| 843 | if (IS_ERR(fs->window_kaddr)) |
| 844 | return PTR_ERR(fs->window_kaddr); |
| 845 | |
| 846 | fs->window_phys_addr = (phys_addr_t) cache_reg.addr; |
| 847 | fs->window_len = (phys_addr_t) cache_reg.len; |
| 848 | |
| 849 | dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n", |
| 850 | __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len); |
| 851 | |
| 852 | fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0); |
| 853 | if (IS_ERR(fs->dax_dev)) |
| 854 | return PTR_ERR(fs->dax_dev); |
| 855 | |
| 856 | return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax, |
| 857 | fs->dax_dev); |
| 858 | } |
| 859 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 860 | static int virtio_fs_probe(struct virtio_device *vdev) |
| 861 | { |
| 862 | struct virtio_fs *fs; |
| 863 | int ret; |
| 864 | |
| 865 | fs = kzalloc(sizeof(*fs), GFP_KERNEL); |
| 866 | if (!fs) |
| 867 | return -ENOMEM; |
| 868 | kref_init(&fs->refcount); |
| 869 | vdev->priv = fs; |
| 870 | |
| 871 | ret = virtio_fs_read_tag(vdev, fs); |
| 872 | if (ret < 0) |
| 873 | goto out; |
| 874 | |
| 875 | ret = virtio_fs_setup_vqs(vdev, fs); |
| 876 | if (ret < 0) |
| 877 | goto out; |
| 878 | |
| 879 | /* TODO vq affinity */ |
| 880 | |
Stefan Hajnoczi | 22f3787 | 2020-08-19 18:19:46 -0400 | [diff] [blame] | 881 | ret = virtio_fs_setup_dax(vdev, fs); |
| 882 | if (ret < 0) |
| 883 | goto out_vqs; |
| 884 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 885 | /* Bring the device online in case the filesystem is mounted and |
| 886 | * requests need to be sent before we return. |
| 887 | */ |
| 888 | virtio_device_ready(vdev); |
| 889 | |
| 890 | ret = virtio_fs_add_instance(fs); |
| 891 | if (ret < 0) |
| 892 | goto out_vqs; |
| 893 | |
| 894 | return 0; |
| 895 | |
| 896 | out_vqs: |
| 897 | vdev->config->reset(vdev); |
| 898 | virtio_fs_cleanup_vqs(vdev, fs); |
| 899 | |
| 900 | out: |
| 901 | vdev->priv = NULL; |
| 902 | kfree(fs); |
| 903 | return ret; |
| 904 | } |
| 905 | |
| 906 | static void virtio_fs_stop_all_queues(struct virtio_fs *fs) |
| 907 | { |
| 908 | struct virtio_fs_vq *fsvq; |
| 909 | int i; |
| 910 | |
| 911 | for (i = 0; i < fs->nvqs; i++) { |
| 912 | fsvq = &fs->vqs[i]; |
| 913 | spin_lock(&fsvq->lock); |
| 914 | fsvq->connected = false; |
| 915 | spin_unlock(&fsvq->lock); |
| 916 | } |
| 917 | } |
| 918 | |
| 919 | static void virtio_fs_remove(struct virtio_device *vdev) |
| 920 | { |
| 921 | struct virtio_fs *fs = vdev->priv; |
| 922 | |
| 923 | mutex_lock(&virtio_fs_mutex); |
| 924 | /* This device is going away. No one should get new reference */ |
| 925 | list_del_init(&fs->list); |
| 926 | virtio_fs_stop_all_queues(fs); |
Vivek Goyal | 724c15a43 | 2019-10-30 11:07:19 -0400 | [diff] [blame] | 927 | virtio_fs_drain_all_queues_locked(fs); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 928 | vdev->config->reset(vdev); |
| 929 | virtio_fs_cleanup_vqs(vdev, fs); |
| 930 | |
| 931 | vdev->priv = NULL; |
| 932 | /* Put device reference on virtio_fs object */ |
| 933 | virtio_fs_put(fs); |
| 934 | mutex_unlock(&virtio_fs_mutex); |
| 935 | } |
| 936 | |
| 937 | #ifdef CONFIG_PM_SLEEP |
| 938 | static int virtio_fs_freeze(struct virtio_device *vdev) |
| 939 | { |
| 940 | /* TODO need to save state here */ |
| 941 | pr_warn("virtio-fs: suspend/resume not yet supported\n"); |
| 942 | return -EOPNOTSUPP; |
| 943 | } |
| 944 | |
| 945 | static int virtio_fs_restore(struct virtio_device *vdev) |
| 946 | { |
| 947 | /* TODO need to restore state here */ |
| 948 | return 0; |
| 949 | } |
| 950 | #endif /* CONFIG_PM_SLEEP */ |
| 951 | |
YueHaibing | 0092944 | 2019-11-11 20:23:59 +0800 | [diff] [blame] | 952 | static const struct virtio_device_id id_table[] = { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 953 | { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID }, |
| 954 | {}, |
| 955 | }; |
| 956 | |
YueHaibing | 0092944 | 2019-11-11 20:23:59 +0800 | [diff] [blame] | 957 | static const unsigned int feature_table[] = {}; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 958 | |
| 959 | static struct virtio_driver virtio_fs_driver = { |
| 960 | .driver.name = KBUILD_MODNAME, |
| 961 | .driver.owner = THIS_MODULE, |
| 962 | .id_table = id_table, |
| 963 | .feature_table = feature_table, |
| 964 | .feature_table_size = ARRAY_SIZE(feature_table), |
| 965 | .probe = virtio_fs_probe, |
| 966 | .remove = virtio_fs_remove, |
| 967 | #ifdef CONFIG_PM_SLEEP |
| 968 | .freeze = virtio_fs_freeze, |
| 969 | .restore = virtio_fs_restore, |
| 970 | #endif |
| 971 | }; |
| 972 | |
| 973 | static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) |
| 974 | __releases(fiq->lock) |
| 975 | { |
| 976 | struct fuse_forget_link *link; |
| 977 | struct virtio_fs_forget *forget; |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 978 | struct virtio_fs_forget_req *req; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 979 | struct virtio_fs *fs; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 980 | struct virtio_fs_vq *fsvq; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 981 | u64 unique; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 982 | |
| 983 | link = fuse_dequeue_forget(fiq, 1, NULL); |
| 984 | unique = fuse_get_unique(fiq); |
| 985 | |
| 986 | fs = fiq->priv; |
| 987 | fsvq = &fs->vqs[VQ_HIPRIO]; |
| 988 | spin_unlock(&fiq->lock); |
| 989 | |
| 990 | /* Allocate a buffer for the request */ |
| 991 | forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL); |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 992 | req = &forget->req; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 993 | |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 994 | req->ih = (struct fuse_in_header){ |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 995 | .opcode = FUSE_FORGET, |
| 996 | .nodeid = link->forget_one.nodeid, |
| 997 | .unique = unique, |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 998 | .len = sizeof(*req), |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 999 | }; |
Vivek Goyal | 1efcf39 | 2019-10-30 11:07:18 -0400 | [diff] [blame] | 1000 | req->arg = (struct fuse_forget_in){ |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1001 | .nlookup = link->forget_one.nlookup, |
| 1002 | }; |
| 1003 | |
Vivek Goyal | 58ada94 | 2019-10-30 11:07:17 -0400 | [diff] [blame] | 1004 | send_forget_request(fsvq, forget, false); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1005 | kfree(link); |
| 1006 | } |
| 1007 | |
| 1008 | static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) |
| 1009 | __releases(fiq->lock) |
| 1010 | { |
| 1011 | /* |
| 1012 | * TODO interrupts. |
| 1013 | * |
| 1014 | * Normal fs operations on a local filesystems aren't interruptible. |
| 1015 | * Exceptions are blocking lock operations; for example fcntl(F_SETLKW) |
| 1016 | * with shared lock between host and guest. |
| 1017 | */ |
| 1018 | spin_unlock(&fiq->lock); |
| 1019 | } |
| 1020 | |
Vivek Goyal | 42d3e2d | 2020-10-06 14:53:06 -0400 | [diff] [blame] | 1021 | /* Count number of scatter-gather elements required */ |
| 1022 | static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs, |
| 1023 | unsigned int num_pages, |
| 1024 | unsigned int total_len) |
| 1025 | { |
| 1026 | unsigned int i; |
| 1027 | unsigned int this_len; |
| 1028 | |
| 1029 | for (i = 0; i < num_pages && total_len; i++) { |
| 1030 | this_len = min(page_descs[i].length, total_len); |
| 1031 | total_len -= this_len; |
| 1032 | } |
| 1033 | |
| 1034 | return i; |
| 1035 | } |
| 1036 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1037 | /* Return the number of scatter-gather list elements required */ |
| 1038 | static unsigned int sg_count_fuse_req(struct fuse_req *req) |
| 1039 | { |
| 1040 | struct fuse_args *args = req->args; |
| 1041 | struct fuse_args_pages *ap = container_of(args, typeof(*ap), args); |
Vivek Goyal | 42d3e2d | 2020-10-06 14:53:06 -0400 | [diff] [blame] | 1042 | unsigned int size, total_sgs = 1 /* fuse_in_header */; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1043 | |
| 1044 | if (args->in_numargs - args->in_pages) |
| 1045 | total_sgs += 1; |
| 1046 | |
Vivek Goyal | 42d3e2d | 2020-10-06 14:53:06 -0400 | [diff] [blame] | 1047 | if (args->in_pages) { |
| 1048 | size = args->in_args[args->in_numargs - 1].size; |
| 1049 | total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, |
| 1050 | size); |
| 1051 | } |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1052 | |
| 1053 | if (!test_bit(FR_ISREPLY, &req->flags)) |
| 1054 | return total_sgs; |
| 1055 | |
| 1056 | total_sgs += 1 /* fuse_out_header */; |
| 1057 | |
| 1058 | if (args->out_numargs - args->out_pages) |
| 1059 | total_sgs += 1; |
| 1060 | |
Vivek Goyal | 42d3e2d | 2020-10-06 14:53:06 -0400 | [diff] [blame] | 1061 | if (args->out_pages) { |
| 1062 | size = args->out_args[args->out_numargs - 1].size; |
| 1063 | total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, |
| 1064 | size); |
| 1065 | } |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1066 | |
| 1067 | return total_sgs; |
| 1068 | } |
| 1069 | |
| 1070 | /* Add pages to scatter-gather list and return number of elements used */ |
| 1071 | static unsigned int sg_init_fuse_pages(struct scatterlist *sg, |
| 1072 | struct page **pages, |
| 1073 | struct fuse_page_desc *page_descs, |
| 1074 | unsigned int num_pages, |
| 1075 | unsigned int total_len) |
| 1076 | { |
| 1077 | unsigned int i; |
| 1078 | unsigned int this_len; |
| 1079 | |
| 1080 | for (i = 0; i < num_pages && total_len; i++) { |
| 1081 | sg_init_table(&sg[i], 1); |
| 1082 | this_len = min(page_descs[i].length, total_len); |
| 1083 | sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset); |
| 1084 | total_len -= this_len; |
| 1085 | } |
| 1086 | |
| 1087 | return i; |
| 1088 | } |
| 1089 | |
| 1090 | /* Add args to scatter-gather list and return number of elements used */ |
| 1091 | static unsigned int sg_init_fuse_args(struct scatterlist *sg, |
| 1092 | struct fuse_req *req, |
| 1093 | struct fuse_arg *args, |
| 1094 | unsigned int numargs, |
| 1095 | bool argpages, |
| 1096 | void *argbuf, |
| 1097 | unsigned int *len_used) |
| 1098 | { |
| 1099 | struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); |
| 1100 | unsigned int total_sgs = 0; |
| 1101 | unsigned int len; |
| 1102 | |
| 1103 | len = fuse_len_args(numargs - argpages, args); |
| 1104 | if (len) |
| 1105 | sg_init_one(&sg[total_sgs++], argbuf, len); |
| 1106 | |
| 1107 | if (argpages) |
| 1108 | total_sgs += sg_init_fuse_pages(&sg[total_sgs], |
| 1109 | ap->pages, ap->descs, |
| 1110 | ap->num_pages, |
| 1111 | args[numargs - 1].size); |
| 1112 | |
| 1113 | if (len_used) |
| 1114 | *len_used = len; |
| 1115 | |
| 1116 | return total_sgs; |
| 1117 | } |
| 1118 | |
| 1119 | /* Add a request to a virtqueue and kick the device */ |
| 1120 | static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 1121 | struct fuse_req *req, bool in_flight) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1122 | { |
| 1123 | /* requests need at least 4 elements */ |
| 1124 | struct scatterlist *stack_sgs[6]; |
| 1125 | struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)]; |
| 1126 | struct scatterlist **sgs = stack_sgs; |
| 1127 | struct scatterlist *sg = stack_sg; |
| 1128 | struct virtqueue *vq; |
| 1129 | struct fuse_args *args = req->args; |
| 1130 | unsigned int argbuf_used = 0; |
| 1131 | unsigned int out_sgs = 0; |
| 1132 | unsigned int in_sgs = 0; |
| 1133 | unsigned int total_sgs; |
| 1134 | unsigned int i; |
| 1135 | int ret; |
| 1136 | bool notify; |
Vivek Goyal | 5dbe190 | 2019-10-15 13:46:24 -0400 | [diff] [blame] | 1137 | struct fuse_pqueue *fpq; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1138 | |
| 1139 | /* Does the sglist fit on the stack? */ |
| 1140 | total_sgs = sg_count_fuse_req(req); |
| 1141 | if (total_sgs > ARRAY_SIZE(stack_sgs)) { |
| 1142 | sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC); |
| 1143 | sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC); |
| 1144 | if (!sgs || !sg) { |
| 1145 | ret = -ENOMEM; |
| 1146 | goto out; |
| 1147 | } |
| 1148 | } |
| 1149 | |
| 1150 | /* Use a bounce buffer since stack args cannot be mapped */ |
| 1151 | ret = copy_args_to_argbuf(req); |
| 1152 | if (ret < 0) |
| 1153 | goto out; |
| 1154 | |
| 1155 | /* Request elements */ |
| 1156 | sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h)); |
| 1157 | out_sgs += sg_init_fuse_args(&sg[out_sgs], req, |
| 1158 | (struct fuse_arg *)args->in_args, |
| 1159 | args->in_numargs, args->in_pages, |
| 1160 | req->argbuf, &argbuf_used); |
| 1161 | |
| 1162 | /* Reply elements */ |
| 1163 | if (test_bit(FR_ISREPLY, &req->flags)) { |
| 1164 | sg_init_one(&sg[out_sgs + in_sgs++], |
| 1165 | &req->out.h, sizeof(req->out.h)); |
| 1166 | in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req, |
| 1167 | args->out_args, args->out_numargs, |
| 1168 | args->out_pages, |
| 1169 | req->argbuf + argbuf_used, NULL); |
| 1170 | } |
| 1171 | |
| 1172 | WARN_ON(out_sgs + in_sgs != total_sgs); |
| 1173 | |
| 1174 | for (i = 0; i < total_sgs; i++) |
| 1175 | sgs[i] = &sg[i]; |
| 1176 | |
| 1177 | spin_lock(&fsvq->lock); |
| 1178 | |
| 1179 | if (!fsvq->connected) { |
| 1180 | spin_unlock(&fsvq->lock); |
| 1181 | ret = -ENOTCONN; |
| 1182 | goto out; |
| 1183 | } |
| 1184 | |
| 1185 | vq = fsvq->vq; |
| 1186 | ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC); |
| 1187 | if (ret < 0) { |
| 1188 | spin_unlock(&fsvq->lock); |
| 1189 | goto out; |
| 1190 | } |
| 1191 | |
Vivek Goyal | 5dbe190 | 2019-10-15 13:46:24 -0400 | [diff] [blame] | 1192 | /* Request successfully sent. */ |
| 1193 | fpq = &fsvq->fud->pq; |
| 1194 | spin_lock(&fpq->lock); |
| 1195 | list_add_tail(&req->list, fpq->processing); |
| 1196 | spin_unlock(&fpq->lock); |
| 1197 | set_bit(FR_SENT, &req->flags); |
| 1198 | /* matches barrier in request_wait_answer() */ |
| 1199 | smp_mb__after_atomic(); |
| 1200 | |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 1201 | if (!in_flight) |
| 1202 | inc_in_flight_req(fsvq); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1203 | notify = virtqueue_kick_prepare(vq); |
| 1204 | |
| 1205 | spin_unlock(&fsvq->lock); |
| 1206 | |
| 1207 | if (notify) |
| 1208 | virtqueue_notify(vq); |
| 1209 | |
| 1210 | out: |
| 1211 | if (ret < 0 && req->argbuf) { |
| 1212 | kfree(req->argbuf); |
| 1213 | req->argbuf = NULL; |
| 1214 | } |
| 1215 | if (sgs != stack_sgs) { |
| 1216 | kfree(sgs); |
| 1217 | kfree(sg); |
| 1218 | } |
| 1219 | |
| 1220 | return ret; |
| 1221 | } |
| 1222 | |
| 1223 | static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) |
| 1224 | __releases(fiq->lock) |
| 1225 | { |
| 1226 | unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ |
| 1227 | struct virtio_fs *fs; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1228 | struct fuse_req *req; |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 1229 | struct virtio_fs_vq *fsvq; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1230 | int ret; |
| 1231 | |
| 1232 | WARN_ON(list_empty(&fiq->pending)); |
| 1233 | req = list_last_entry(&fiq->pending, struct fuse_req, list); |
| 1234 | clear_bit(FR_PENDING, &req->flags); |
| 1235 | list_del_init(&req->list); |
| 1236 | WARN_ON(!list_empty(&fiq->pending)); |
| 1237 | spin_unlock(&fiq->lock); |
| 1238 | |
| 1239 | fs = fiq->priv; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1240 | |
| 1241 | pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", |
| 1242 | __func__, req->in.h.opcode, req->in.h.unique, |
| 1243 | req->in.h.nodeid, req->in.h.len, |
| 1244 | fuse_len_args(req->args->out_numargs, req->args->out_args)); |
| 1245 | |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 1246 | fsvq = &fs->vqs[queue_id]; |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 1247 | ret = virtio_fs_enqueue_req(fsvq, req, false); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1248 | if (ret < 0) { |
| 1249 | if (ret == -ENOMEM || ret == -ENOSPC) { |
Vivek Goyal | a9bfd9d | 2019-10-15 13:46:26 -0400 | [diff] [blame] | 1250 | /* |
| 1251 | * Virtqueue full. Retry submission from worker |
| 1252 | * context as we might be holding fc->bg_lock. |
| 1253 | */ |
| 1254 | spin_lock(&fsvq->lock); |
| 1255 | list_add_tail(&req->list, &fsvq->queued_reqs); |
| 1256 | inc_in_flight_req(fsvq); |
| 1257 | schedule_delayed_work(&fsvq->dispatch_work, |
| 1258 | msecs_to_jiffies(1)); |
| 1259 | spin_unlock(&fsvq->lock); |
| 1260 | return; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1261 | } |
| 1262 | req->out.h.error = ret; |
| 1263 | pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret); |
Vivek Goyal | 51fecdd | 2019-10-15 13:46:22 -0400 | [diff] [blame] | 1264 | |
| 1265 | /* Can't end request in submission context. Use a worker */ |
| 1266 | spin_lock(&fsvq->lock); |
| 1267 | list_add_tail(&req->list, &fsvq->end_reqs); |
| 1268 | schedule_delayed_work(&fsvq->dispatch_work, 0); |
| 1269 | spin_unlock(&fsvq->lock); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1270 | return; |
| 1271 | } |
| 1272 | } |
| 1273 | |
YueHaibing | 0092944 | 2019-11-11 20:23:59 +0800 | [diff] [blame] | 1274 | static const struct fuse_iqueue_ops virtio_fs_fiq_ops = { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1275 | .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock, |
| 1276 | .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock, |
| 1277 | .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock, |
| 1278 | .release = virtio_fs_fiq_release, |
| 1279 | }; |
| 1280 | |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1281 | static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1282 | { |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1283 | ctx->rootmode = S_IFDIR; |
| 1284 | ctx->default_permissions = 1; |
| 1285 | ctx->allow_other = 1; |
| 1286 | ctx->max_read = UINT_MAX; |
| 1287 | ctx->blksize = 512; |
| 1288 | ctx->destroy = true; |
| 1289 | ctx->no_control = true; |
| 1290 | ctx->no_force_umount = true; |
| 1291 | } |
| 1292 | |
| 1293 | static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1294 | { |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1295 | struct fuse_mount *fm = get_fuse_mount_super(sb); |
| 1296 | struct fuse_conn *fc = fm->fc; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1297 | struct virtio_fs *fs = fc->iq.priv; |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1298 | struct fuse_fs_context *ctx = fsc->fs_private; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1299 | unsigned int i; |
| 1300 | int err; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1301 | |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1302 | virtio_fs_ctx_set_defaults(ctx); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1303 | mutex_lock(&virtio_fs_mutex); |
| 1304 | |
| 1305 | /* After holding mutex, make sure virtiofs device is still there. |
| 1306 | * Though we are holding a reference to it, drive ->remove might |
| 1307 | * still have cleaned up virtual queues. In that case bail out. |
| 1308 | */ |
| 1309 | err = -EINVAL; |
| 1310 | if (list_empty(&fs->list)) { |
| 1311 | pr_info("virtio-fs: tag <%s> not found\n", fs->tag); |
| 1312 | goto err; |
| 1313 | } |
| 1314 | |
| 1315 | err = -ENOMEM; |
| 1316 | /* Allocate fuse_dev for hiprio and notification queues */ |
Vivek Goyal | 7fd3abf | 2020-05-04 14:33:15 -0400 | [diff] [blame] | 1317 | for (i = 0; i < fs->nvqs; i++) { |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1318 | struct virtio_fs_vq *fsvq = &fs->vqs[i]; |
| 1319 | |
| 1320 | fsvq->fud = fuse_dev_alloc(); |
| 1321 | if (!fsvq->fud) |
| 1322 | goto err_free_fuse_devs; |
| 1323 | } |
| 1324 | |
Vivek Goyal | 7fd3abf | 2020-05-04 14:33:15 -0400 | [diff] [blame] | 1325 | /* virtiofs allocates and installs its own fuse devices */ |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1326 | ctx->fudptr = NULL; |
| 1327 | if (ctx->dax) |
| 1328 | ctx->dax_dev = fs->dax_dev; |
| 1329 | err = fuse_fill_super_common(sb, ctx); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1330 | if (err < 0) |
| 1331 | goto err_free_fuse_devs; |
| 1332 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1333 | for (i = 0; i < fs->nvqs; i++) { |
| 1334 | struct virtio_fs_vq *fsvq = &fs->vqs[i]; |
| 1335 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1336 | fuse_dev_install(fsvq->fud, fc); |
| 1337 | } |
| 1338 | |
| 1339 | /* Previous unmount will stop all queues. Start these again */ |
| 1340 | virtio_fs_start_all_queues(fs); |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1341 | fuse_send_init(fm); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1342 | mutex_unlock(&virtio_fs_mutex); |
| 1343 | return 0; |
| 1344 | |
| 1345 | err_free_fuse_devs: |
| 1346 | virtio_fs_free_devs(fs); |
| 1347 | err: |
| 1348 | mutex_unlock(&virtio_fs_mutex); |
| 1349 | return err; |
| 1350 | } |
| 1351 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1352 | static void virtio_fs_conn_destroy(struct fuse_mount *fm) |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1353 | { |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1354 | struct fuse_conn *fc = fm->fc; |
| 1355 | struct virtio_fs *vfs = fc->iq.priv; |
| 1356 | struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO]; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1357 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1358 | /* Stop dax worker. Soon evict_inodes() will be called which |
| 1359 | * will free all memory ranges belonging to all inodes. |
Vivek Goyal | 9a752d1 | 2020-08-19 18:19:56 -0400 | [diff] [blame] | 1360 | */ |
| 1361 | if (IS_ENABLED(CONFIG_FUSE_DAX)) |
| 1362 | fuse_dax_cancel_work(fc); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1363 | |
| 1364 | /* Stop forget queue. Soon destroy will be sent */ |
| 1365 | spin_lock(&fsvq->lock); |
| 1366 | fsvq->connected = false; |
| 1367 | spin_unlock(&fsvq->lock); |
| 1368 | virtio_fs_drain_all_queues(vfs); |
| 1369 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1370 | fuse_conn_destroy(fm); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1371 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1372 | /* fuse_conn_destroy() must have sent destroy. Stop all queues |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1373 | * and drain one more time and free fuse devices. Freeing fuse |
| 1374 | * devices will drop their reference on fuse_conn and that in |
| 1375 | * turn will drop its reference on virtio_fs object. |
| 1376 | */ |
| 1377 | virtio_fs_stop_all_queues(vfs); |
| 1378 | virtio_fs_drain_all_queues(vfs); |
| 1379 | virtio_fs_free_devs(vfs); |
| 1380 | } |
| 1381 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1382 | static void virtio_kill_sb(struct super_block *sb) |
| 1383 | { |
| 1384 | struct fuse_mount *fm = get_fuse_mount_super(sb); |
| 1385 | bool last; |
| 1386 | |
| 1387 | /* If mount failed, we can still be called without any fc */ |
| 1388 | if (fm) { |
| 1389 | last = fuse_mount_remove(fm); |
| 1390 | if (last) |
| 1391 | virtio_fs_conn_destroy(fm); |
| 1392 | } |
| 1393 | kill_anon_super(sb); |
| 1394 | } |
| 1395 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1396 | static int virtio_fs_test_super(struct super_block *sb, |
| 1397 | struct fs_context *fsc) |
| 1398 | { |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1399 | struct fuse_mount *fsc_fm = fsc->s_fs_info; |
| 1400 | struct fuse_mount *sb_fm = get_fuse_mount_super(sb); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1401 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1402 | return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1403 | } |
| 1404 | |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1405 | static int virtio_fs_get_tree(struct fs_context *fsc) |
| 1406 | { |
| 1407 | struct virtio_fs *fs; |
| 1408 | struct super_block *sb; |
| 1409 | struct fuse_conn *fc; |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1410 | struct fuse_mount *fm; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1411 | int err; |
| 1412 | |
| 1413 | /* This gets a reference on virtio_fs object. This ptr gets installed |
| 1414 | * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() |
| 1415 | * to drop the reference to this object. |
| 1416 | */ |
| 1417 | fs = virtio_fs_find_instance(fsc->source); |
| 1418 | if (!fs) { |
| 1419 | pr_info("virtio-fs: tag <%s> not found\n", fsc->source); |
| 1420 | return -EINVAL; |
| 1421 | } |
| 1422 | |
| 1423 | fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL); |
| 1424 | if (!fc) { |
| 1425 | mutex_lock(&virtio_fs_mutex); |
| 1426 | virtio_fs_put(fs); |
| 1427 | mutex_unlock(&virtio_fs_mutex); |
| 1428 | return -ENOMEM; |
| 1429 | } |
| 1430 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1431 | fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); |
| 1432 | if (!fm) { |
| 1433 | mutex_lock(&virtio_fs_mutex); |
| 1434 | virtio_fs_put(fs); |
| 1435 | mutex_unlock(&virtio_fs_mutex); |
| 1436 | kfree(fc); |
| 1437 | return -ENOMEM; |
| 1438 | } |
| 1439 | |
| 1440 | fuse_conn_init(fc, fm, get_user_ns(current_user_ns()), |
| 1441 | &virtio_fs_fiq_ops, fs); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1442 | fc->release = fuse_free_conn; |
| 1443 | fc->delete_stale = true; |
Max Reitz | bf109c6 | 2020-04-21 14:47:15 +0200 | [diff] [blame] | 1444 | fc->auto_submounts = true; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1445 | |
Max Reitz | fcee216 | 2020-05-06 17:44:12 +0200 | [diff] [blame] | 1446 | fsc->s_fs_info = fm; |
Miklos Szeredi | b19d3d0 | 2020-11-11 17:22:31 +0100 | [diff] [blame] | 1447 | sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc); |
Miklos Szeredi | 514b5e3 | 2020-11-11 17:22:32 +0100 | [diff] [blame^] | 1448 | if (fsc->s_fs_info) { |
| 1449 | fuse_conn_put(fc); |
| 1450 | kfree(fm); |
| 1451 | } |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1452 | if (IS_ERR(sb)) |
| 1453 | return PTR_ERR(sb); |
| 1454 | |
| 1455 | if (!sb->s_root) { |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1456 | err = virtio_fs_fill_super(sb, fsc); |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1457 | if (err) { |
Miklos Szeredi | 514b5e3 | 2020-11-11 17:22:32 +0100 | [diff] [blame^] | 1458 | fuse_conn_put(fc); |
| 1459 | kfree(fm); |
Miklos Szeredi | 66ab33b | 2020-11-11 17:22:31 +0100 | [diff] [blame] | 1460 | sb->s_fs_info = NULL; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1461 | deactivate_locked_super(sb); |
| 1462 | return err; |
| 1463 | } |
| 1464 | |
| 1465 | sb->s_flags |= SB_ACTIVE; |
| 1466 | } |
| 1467 | |
| 1468 | WARN_ON(fsc->root); |
| 1469 | fsc->root = dget(sb->s_root); |
| 1470 | return 0; |
| 1471 | } |
| 1472 | |
| 1473 | static const struct fs_context_operations virtio_fs_context_ops = { |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1474 | .free = virtio_fs_free_fc, |
| 1475 | .parse_param = virtio_fs_parse_param, |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1476 | .get_tree = virtio_fs_get_tree, |
| 1477 | }; |
| 1478 | |
| 1479 | static int virtio_fs_init_fs_context(struct fs_context *fsc) |
| 1480 | { |
Vivek Goyal | 1dd5395 | 2020-08-19 18:19:47 -0400 | [diff] [blame] | 1481 | struct fuse_fs_context *ctx; |
| 1482 | |
| 1483 | ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL); |
| 1484 | if (!ctx) |
| 1485 | return -ENOMEM; |
| 1486 | fsc->fs_private = ctx; |
Stefan Hajnoczi | a62a8ef | 2018-06-12 09:41:17 +0100 | [diff] [blame] | 1487 | fsc->ops = &virtio_fs_context_ops; |
| 1488 | return 0; |
| 1489 | } |
| 1490 | |
| 1491 | static struct file_system_type virtio_fs_type = { |
| 1492 | .owner = THIS_MODULE, |
| 1493 | .name = "virtiofs", |
| 1494 | .init_fs_context = virtio_fs_init_fs_context, |
| 1495 | .kill_sb = virtio_kill_sb, |
| 1496 | }; |
| 1497 | |
| 1498 | static int __init virtio_fs_init(void) |
| 1499 | { |
| 1500 | int ret; |
| 1501 | |
| 1502 | ret = register_virtio_driver(&virtio_fs_driver); |
| 1503 | if (ret < 0) |
| 1504 | return ret; |
| 1505 | |
| 1506 | ret = register_filesystem(&virtio_fs_type); |
| 1507 | if (ret < 0) { |
| 1508 | unregister_virtio_driver(&virtio_fs_driver); |
| 1509 | return ret; |
| 1510 | } |
| 1511 | |
| 1512 | return 0; |
| 1513 | } |
| 1514 | module_init(virtio_fs_init); |
| 1515 | |
| 1516 | static void __exit virtio_fs_exit(void) |
| 1517 | { |
| 1518 | unregister_filesystem(&virtio_fs_type); |
| 1519 | unregister_virtio_driver(&virtio_fs_driver); |
| 1520 | } |
| 1521 | module_exit(virtio_fs_exit); |
| 1522 | |
| 1523 | MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>"); |
| 1524 | MODULE_DESCRIPTION("Virtio Filesystem"); |
| 1525 | MODULE_LICENSE("GPL"); |
| 1526 | MODULE_ALIAS_FS(KBUILD_MODNAME); |
| 1527 | MODULE_DEVICE_TABLE(virtio, id_table); |