Hans de Goede | 0fd1695 | 2019-12-12 15:09:14 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * VirtualBox Guest Shared Folders support: Virtual File System. |
| 4 | * |
| 5 | * Module initialization/finalization |
| 6 | * File system registration/deregistration |
| 7 | * Superblock reading |
| 8 | * Few utility functions |
| 9 | * |
| 10 | * Copyright (C) 2006-2018 Oracle Corporation |
| 11 | */ |
| 12 | |
| 13 | #include <linux/idr.h> |
| 14 | #include <linux/fs_parser.h> |
| 15 | #include <linux/magic.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/nls.h> |
| 18 | #include <linux/statfs.h> |
| 19 | #include <linux/vbox_utils.h> |
| 20 | #include "vfsmod.h" |
| 21 | |
| 22 | #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ |
| 23 | |
| 24 | #define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000') |
| 25 | #define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377') |
| 26 | #define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376') |
| 27 | #define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375') |
| 28 | |
| 29 | static int follow_symlinks; |
| 30 | module_param(follow_symlinks, int, 0444); |
| 31 | MODULE_PARM_DESC(follow_symlinks, |
| 32 | "Let host resolve symlinks rather than showing them"); |
| 33 | |
| 34 | static DEFINE_IDA(vboxsf_bdi_ida); |
| 35 | static DEFINE_MUTEX(vboxsf_setup_mutex); |
| 36 | static bool vboxsf_setup_done; |
| 37 | static struct super_operations vboxsf_super_ops; /* forward declaration */ |
| 38 | static struct kmem_cache *vboxsf_inode_cachep; |
| 39 | |
| 40 | static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT; |
| 41 | |
| 42 | enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode, |
| 43 | opt_dmask, opt_fmask }; |
| 44 | |
| 45 | static const struct fs_parameter_spec vboxsf_fs_parameters[] = { |
| 46 | fsparam_string ("nls", opt_nls), |
| 47 | fsparam_u32 ("uid", opt_uid), |
| 48 | fsparam_u32 ("gid", opt_gid), |
| 49 | fsparam_u32 ("ttl", opt_ttl), |
| 50 | fsparam_u32oct ("dmode", opt_dmode), |
| 51 | fsparam_u32oct ("fmode", opt_fmode), |
| 52 | fsparam_u32oct ("dmask", opt_dmask), |
| 53 | fsparam_u32oct ("fmask", opt_fmask), |
| 54 | {} |
| 55 | }; |
| 56 | |
| 57 | static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param) |
| 58 | { |
| 59 | struct vboxsf_fs_context *ctx = fc->fs_private; |
| 60 | struct fs_parse_result result; |
| 61 | kuid_t uid; |
| 62 | kgid_t gid; |
| 63 | int opt; |
| 64 | |
| 65 | opt = fs_parse(fc, vboxsf_fs_parameters, param, &result); |
| 66 | if (opt < 0) |
| 67 | return opt; |
| 68 | |
| 69 | switch (opt) { |
| 70 | case opt_nls: |
| 71 | if (ctx->nls_name || fc->purpose != FS_CONTEXT_FOR_MOUNT) { |
| 72 | vbg_err("vboxsf: Cannot reconfigure nls option\n"); |
| 73 | return -EINVAL; |
| 74 | } |
| 75 | ctx->nls_name = param->string; |
| 76 | param->string = NULL; |
| 77 | break; |
| 78 | case opt_uid: |
| 79 | uid = make_kuid(current_user_ns(), result.uint_32); |
| 80 | if (!uid_valid(uid)) |
| 81 | return -EINVAL; |
| 82 | ctx->o.uid = uid; |
| 83 | break; |
| 84 | case opt_gid: |
| 85 | gid = make_kgid(current_user_ns(), result.uint_32); |
| 86 | if (!gid_valid(gid)) |
| 87 | return -EINVAL; |
| 88 | ctx->o.gid = gid; |
| 89 | break; |
| 90 | case opt_ttl: |
| 91 | ctx->o.ttl = msecs_to_jiffies(result.uint_32); |
| 92 | break; |
| 93 | case opt_dmode: |
| 94 | if (result.uint_32 & ~0777) |
| 95 | return -EINVAL; |
| 96 | ctx->o.dmode = result.uint_32; |
| 97 | ctx->o.dmode_set = true; |
| 98 | break; |
| 99 | case opt_fmode: |
| 100 | if (result.uint_32 & ~0777) |
| 101 | return -EINVAL; |
| 102 | ctx->o.fmode = result.uint_32; |
| 103 | ctx->o.fmode_set = true; |
| 104 | break; |
| 105 | case opt_dmask: |
| 106 | if (result.uint_32 & ~07777) |
| 107 | return -EINVAL; |
| 108 | ctx->o.dmask = result.uint_32; |
| 109 | break; |
| 110 | case opt_fmask: |
| 111 | if (result.uint_32 & ~07777) |
| 112 | return -EINVAL; |
| 113 | ctx->o.fmask = result.uint_32; |
| 114 | break; |
| 115 | default: |
| 116 | return -EINVAL; |
| 117 | } |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc) |
| 123 | { |
| 124 | struct vboxsf_fs_context *ctx = fc->fs_private; |
| 125 | struct shfl_string *folder_name, root_path; |
| 126 | struct vboxsf_sbi *sbi; |
| 127 | struct dentry *droot; |
| 128 | struct inode *iroot; |
| 129 | char *nls_name; |
| 130 | size_t size; |
| 131 | int err; |
| 132 | |
| 133 | if (!fc->source) |
| 134 | return -EINVAL; |
| 135 | |
| 136 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
| 137 | if (!sbi) |
| 138 | return -ENOMEM; |
| 139 | |
| 140 | sbi->o = ctx->o; |
| 141 | idr_init(&sbi->ino_idr); |
| 142 | spin_lock_init(&sbi->ino_idr_lock); |
| 143 | sbi->next_generation = 1; |
| 144 | sbi->bdi_id = -1; |
| 145 | |
| 146 | /* Load nls if not utf8 */ |
| 147 | nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls; |
| 148 | if (strcmp(nls_name, "utf8") != 0) { |
| 149 | if (nls_name == vboxsf_default_nls) |
| 150 | sbi->nls = load_nls_default(); |
| 151 | else |
| 152 | sbi->nls = load_nls(nls_name); |
| 153 | |
| 154 | if (!sbi->nls) { |
| 155 | vbg_err("vboxsf: Count not load '%s' nls\n", nls_name); |
| 156 | err = -EINVAL; |
| 157 | goto fail_free; |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL); |
| 162 | if (sbi->bdi_id < 0) { |
| 163 | err = sbi->bdi_id; |
| 164 | goto fail_free; |
| 165 | } |
| 166 | |
Christoph Hellwig | 156c757 | 2020-05-04 14:47:53 +0200 | [diff] [blame^] | 167 | err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id); |
Hans de Goede | 0fd1695 | 2019-12-12 15:09:14 +0100 | [diff] [blame] | 168 | if (err) |
| 169 | goto fail_free; |
| 170 | |
| 171 | /* Turn source into a shfl_string and map the folder */ |
| 172 | size = strlen(fc->source) + 1; |
| 173 | folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL); |
| 174 | if (!folder_name) { |
| 175 | err = -ENOMEM; |
| 176 | goto fail_free; |
| 177 | } |
| 178 | folder_name->size = size; |
| 179 | folder_name->length = size - 1; |
| 180 | strlcpy(folder_name->string.utf8, fc->source, size); |
| 181 | err = vboxsf_map_folder(folder_name, &sbi->root); |
| 182 | kfree(folder_name); |
| 183 | if (err) { |
| 184 | vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n", |
| 185 | fc->source, err); |
| 186 | goto fail_free; |
| 187 | } |
| 188 | |
| 189 | root_path.length = 1; |
| 190 | root_path.size = 2; |
| 191 | root_path.string.utf8[0] = '/'; |
| 192 | root_path.string.utf8[1] = 0; |
| 193 | err = vboxsf_stat(sbi, &root_path, &sbi->root_info); |
| 194 | if (err) |
| 195 | goto fail_unmap; |
| 196 | |
| 197 | sb->s_magic = VBOXSF_SUPER_MAGIC; |
| 198 | sb->s_blocksize = 1024; |
| 199 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
| 200 | sb->s_op = &vboxsf_super_ops; |
| 201 | sb->s_d_op = &vboxsf_dentry_ops; |
| 202 | |
| 203 | iroot = iget_locked(sb, 0); |
| 204 | if (!iroot) { |
| 205 | err = -ENOMEM; |
| 206 | goto fail_unmap; |
| 207 | } |
| 208 | vboxsf_init_inode(sbi, iroot, &sbi->root_info); |
| 209 | unlock_new_inode(iroot); |
| 210 | |
| 211 | droot = d_make_root(iroot); |
| 212 | if (!droot) { |
| 213 | err = -ENOMEM; |
| 214 | goto fail_unmap; |
| 215 | } |
| 216 | |
| 217 | sb->s_root = droot; |
| 218 | sb->s_fs_info = sbi; |
| 219 | return 0; |
| 220 | |
| 221 | fail_unmap: |
| 222 | vboxsf_unmap_folder(sbi->root); |
| 223 | fail_free: |
| 224 | if (sbi->bdi_id >= 0) |
| 225 | ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id); |
| 226 | if (sbi->nls) |
| 227 | unload_nls(sbi->nls); |
| 228 | idr_destroy(&sbi->ino_idr); |
| 229 | kfree(sbi); |
| 230 | return err; |
| 231 | } |
| 232 | |
| 233 | static void vboxsf_inode_init_once(void *data) |
| 234 | { |
| 235 | struct vboxsf_inode *sf_i = data; |
| 236 | |
| 237 | mutex_init(&sf_i->handle_list_mutex); |
| 238 | inode_init_once(&sf_i->vfs_inode); |
| 239 | } |
| 240 | |
| 241 | static struct inode *vboxsf_alloc_inode(struct super_block *sb) |
| 242 | { |
| 243 | struct vboxsf_inode *sf_i; |
| 244 | |
| 245 | sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS); |
| 246 | if (!sf_i) |
| 247 | return NULL; |
| 248 | |
| 249 | sf_i->force_restat = 0; |
| 250 | INIT_LIST_HEAD(&sf_i->handle_list); |
| 251 | |
| 252 | return &sf_i->vfs_inode; |
| 253 | } |
| 254 | |
| 255 | static void vboxsf_free_inode(struct inode *inode) |
| 256 | { |
| 257 | struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); |
| 258 | unsigned long flags; |
| 259 | |
| 260 | spin_lock_irqsave(&sbi->ino_idr_lock, flags); |
| 261 | idr_remove(&sbi->ino_idr, inode->i_ino); |
| 262 | spin_unlock_irqrestore(&sbi->ino_idr_lock, flags); |
| 263 | kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode)); |
| 264 | } |
| 265 | |
| 266 | static void vboxsf_put_super(struct super_block *sb) |
| 267 | { |
| 268 | struct vboxsf_sbi *sbi = VBOXSF_SBI(sb); |
| 269 | |
| 270 | vboxsf_unmap_folder(sbi->root); |
| 271 | if (sbi->bdi_id >= 0) |
| 272 | ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id); |
| 273 | if (sbi->nls) |
| 274 | unload_nls(sbi->nls); |
| 275 | |
| 276 | /* |
| 277 | * vboxsf_free_inode uses the idr, make sure all delayed rcu free |
| 278 | * inodes are flushed. |
| 279 | */ |
| 280 | rcu_barrier(); |
| 281 | idr_destroy(&sbi->ino_idr); |
| 282 | kfree(sbi); |
| 283 | } |
| 284 | |
| 285 | static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat) |
| 286 | { |
| 287 | struct super_block *sb = dentry->d_sb; |
| 288 | struct shfl_volinfo shfl_volinfo; |
| 289 | struct vboxsf_sbi *sbi; |
| 290 | u32 buf_len; |
| 291 | int err; |
| 292 | |
| 293 | sbi = VBOXSF_SBI(sb); |
| 294 | buf_len = sizeof(shfl_volinfo); |
| 295 | err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME, |
| 296 | &buf_len, &shfl_volinfo); |
| 297 | if (err) |
| 298 | return err; |
| 299 | |
| 300 | stat->f_type = VBOXSF_SUPER_MAGIC; |
| 301 | stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit; |
| 302 | |
| 303 | do_div(shfl_volinfo.total_allocation_bytes, |
| 304 | shfl_volinfo.bytes_per_allocation_unit); |
| 305 | stat->f_blocks = shfl_volinfo.total_allocation_bytes; |
| 306 | |
| 307 | do_div(shfl_volinfo.available_allocation_bytes, |
| 308 | shfl_volinfo.bytes_per_allocation_unit); |
| 309 | stat->f_bfree = shfl_volinfo.available_allocation_bytes; |
| 310 | stat->f_bavail = shfl_volinfo.available_allocation_bytes; |
| 311 | |
| 312 | stat->f_files = 1000; |
| 313 | /* |
| 314 | * Don't return 0 here since the guest may then think that it is not |
| 315 | * possible to create any more files. |
| 316 | */ |
| 317 | stat->f_ffree = 1000000; |
| 318 | stat->f_fsid.val[0] = 0; |
| 319 | stat->f_fsid.val[1] = 0; |
| 320 | stat->f_namelen = 255; |
| 321 | return 0; |
| 322 | } |
| 323 | |
| 324 | static struct super_operations vboxsf_super_ops = { |
| 325 | .alloc_inode = vboxsf_alloc_inode, |
| 326 | .free_inode = vboxsf_free_inode, |
| 327 | .put_super = vboxsf_put_super, |
| 328 | .statfs = vboxsf_statfs, |
| 329 | }; |
| 330 | |
| 331 | static int vboxsf_setup(void) |
| 332 | { |
| 333 | int err; |
| 334 | |
| 335 | mutex_lock(&vboxsf_setup_mutex); |
| 336 | |
| 337 | if (vboxsf_setup_done) |
| 338 | goto success; |
| 339 | |
| 340 | vboxsf_inode_cachep = |
| 341 | kmem_cache_create("vboxsf_inode_cache", |
| 342 | sizeof(struct vboxsf_inode), 0, |
| 343 | (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | |
| 344 | SLAB_ACCOUNT), |
| 345 | vboxsf_inode_init_once); |
| 346 | if (!vboxsf_inode_cachep) { |
| 347 | err = -ENOMEM; |
| 348 | goto fail_nomem; |
| 349 | } |
| 350 | |
| 351 | err = vboxsf_connect(); |
| 352 | if (err) { |
| 353 | vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err); |
| 354 | vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n"); |
| 355 | vbg_err("vboxsf: and check dmesg for vboxguest errors\n"); |
| 356 | goto fail_free_cache; |
| 357 | } |
| 358 | |
| 359 | err = vboxsf_set_utf8(); |
| 360 | if (err) { |
| 361 | vbg_err("vboxsf_setutf8 error %d\n", err); |
| 362 | goto fail_disconnect; |
| 363 | } |
| 364 | |
| 365 | if (!follow_symlinks) { |
| 366 | err = vboxsf_set_symlinks(); |
| 367 | if (err) |
| 368 | vbg_warn("vboxsf: Unable to show symlinks: %d\n", err); |
| 369 | } |
| 370 | |
| 371 | vboxsf_setup_done = true; |
| 372 | success: |
| 373 | mutex_unlock(&vboxsf_setup_mutex); |
| 374 | return 0; |
| 375 | |
| 376 | fail_disconnect: |
| 377 | vboxsf_disconnect(); |
| 378 | fail_free_cache: |
| 379 | kmem_cache_destroy(vboxsf_inode_cachep); |
| 380 | fail_nomem: |
| 381 | mutex_unlock(&vboxsf_setup_mutex); |
| 382 | return err; |
| 383 | } |
| 384 | |
| 385 | static int vboxsf_parse_monolithic(struct fs_context *fc, void *data) |
| 386 | { |
| 387 | char *options = data; |
| 388 | |
| 389 | if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 && |
| 390 | options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 && |
| 391 | options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 && |
| 392 | options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) { |
| 393 | vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n"); |
| 394 | return -EINVAL; |
| 395 | } |
| 396 | |
| 397 | return generic_parse_monolithic(fc, data); |
| 398 | } |
| 399 | |
| 400 | static int vboxsf_get_tree(struct fs_context *fc) |
| 401 | { |
| 402 | int err; |
| 403 | |
| 404 | err = vboxsf_setup(); |
| 405 | if (err) |
| 406 | return err; |
| 407 | |
| 408 | return get_tree_nodev(fc, vboxsf_fill_super); |
| 409 | } |
| 410 | |
| 411 | static int vboxsf_reconfigure(struct fs_context *fc) |
| 412 | { |
| 413 | struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb); |
| 414 | struct vboxsf_fs_context *ctx = fc->fs_private; |
| 415 | struct inode *iroot = fc->root->d_sb->s_root->d_inode; |
| 416 | |
| 417 | /* Apply changed options to the root inode */ |
| 418 | sbi->o = ctx->o; |
| 419 | vboxsf_init_inode(sbi, iroot, &sbi->root_info); |
| 420 | |
| 421 | return 0; |
| 422 | } |
| 423 | |
| 424 | static void vboxsf_free_fc(struct fs_context *fc) |
| 425 | { |
| 426 | struct vboxsf_fs_context *ctx = fc->fs_private; |
| 427 | |
| 428 | kfree(ctx->nls_name); |
| 429 | kfree(ctx); |
| 430 | } |
| 431 | |
| 432 | static const struct fs_context_operations vboxsf_context_ops = { |
| 433 | .free = vboxsf_free_fc, |
| 434 | .parse_param = vboxsf_parse_param, |
| 435 | .parse_monolithic = vboxsf_parse_monolithic, |
| 436 | .get_tree = vboxsf_get_tree, |
| 437 | .reconfigure = vboxsf_reconfigure, |
| 438 | }; |
| 439 | |
| 440 | static int vboxsf_init_fs_context(struct fs_context *fc) |
| 441 | { |
| 442 | struct vboxsf_fs_context *ctx; |
| 443 | |
| 444 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 445 | if (!ctx) |
| 446 | return -ENOMEM; |
| 447 | |
| 448 | current_uid_gid(&ctx->o.uid, &ctx->o.gid); |
| 449 | |
| 450 | fc->fs_private = ctx; |
| 451 | fc->ops = &vboxsf_context_ops; |
| 452 | return 0; |
| 453 | } |
| 454 | |
| 455 | static struct file_system_type vboxsf_fs_type = { |
| 456 | .owner = THIS_MODULE, |
| 457 | .name = "vboxsf", |
| 458 | .init_fs_context = vboxsf_init_fs_context, |
| 459 | .kill_sb = kill_anon_super |
| 460 | }; |
| 461 | |
| 462 | /* Module initialization/finalization handlers */ |
| 463 | static int __init vboxsf_init(void) |
| 464 | { |
| 465 | return register_filesystem(&vboxsf_fs_type); |
| 466 | } |
| 467 | |
| 468 | static void __exit vboxsf_fini(void) |
| 469 | { |
| 470 | unregister_filesystem(&vboxsf_fs_type); |
| 471 | |
| 472 | mutex_lock(&vboxsf_setup_mutex); |
| 473 | if (vboxsf_setup_done) { |
| 474 | vboxsf_disconnect(); |
| 475 | /* |
| 476 | * Make sure all delayed rcu free inodes are flushed |
| 477 | * before we destroy the cache. |
| 478 | */ |
| 479 | rcu_barrier(); |
| 480 | kmem_cache_destroy(vboxsf_inode_cachep); |
| 481 | } |
| 482 | mutex_unlock(&vboxsf_setup_mutex); |
| 483 | } |
| 484 | |
| 485 | module_init(vboxsf_init); |
| 486 | module_exit(vboxsf_fini); |
| 487 | |
| 488 | MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access"); |
| 489 | MODULE_AUTHOR("Oracle Corporation"); |
| 490 | MODULE_LICENSE("GPL v2"); |
| 491 | MODULE_ALIAS_FS("vboxsf"); |