blob: e5ec808bd0522cb986ef11cbcc3ece57f6988916 [file] [log] [blame]
Konstantin Komarov82cae262021-08-13 17:21:29 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 *
7 * terminology
8 *
9 * cluster - allocation unit - 512,1K,2K,4K,...,2M
Kari Argillandere8b8e972021-08-03 14:57:09 +030010 * vcn - virtual cluster number - Offset inside the file in clusters.
11 * vbo - virtual byte offset - Offset inside the file in bytes.
12 * lcn - logical cluster number - 0 based cluster in clusters heap.
13 * lbo - logical byte offset - Absolute position inside volume.
14 * run - maps VCN to LCN - Stored in attributes in packed form.
15 * attr - attribute segment - std/name/data etc records inside MFT.
16 * mi - MFT inode - One MFT record(usually 1024 bytes or 4K), consists of attributes.
17 * ni - NTFS inode - Extends linux inode. consists of one or more mft inodes.
18 * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size.
Konstantin Komarov82cae262021-08-13 17:21:29 +030019 *
20 * WSL - Windows Subsystem for Linux
21 * https://docs.microsoft.com/en-us/windows/wsl/file-permissions
22 * It stores uid/gid/mode/dev in xattr
23 *
24 */
25
Konstantin Komarov82cae262021-08-13 17:21:29 +030026#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/exportfs.h>
29#include <linux/fs.h>
Kari Argillander610f8f52021-09-07 18:35:52 +030030#include <linux/fs_context.h>
31#include <linux/fs_parser.h>
Kari Argillander528c9b32021-08-16 13:37:32 +030032#include <linux/log2.h>
Konstantin Komarov82cae262021-08-13 17:21:29 +030033#include <linux/module.h>
34#include <linux/nls.h>
Konstantin Komarov82cae262021-08-13 17:21:29 +030035#include <linux/seq_file.h>
36#include <linux/statfs.h>
37
38#include "debug.h"
39#include "ntfs.h"
40#include "ntfs_fs.h"
41#ifdef CONFIG_NTFS3_LZX_XPRESS
42#include "lib/lib.h"
43#endif
44
45#ifdef CONFIG_PRINTK
46/*
Kari Argillandere8b8e972021-08-03 14:57:09 +030047 * ntfs_printk - Trace warnings/notices/errors.
48 *
Konstantin Komarov82cae262021-08-13 17:21:29 +030049 * Thanks Joe Perches <joe@perches.com> for implementation
50 */
51void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
52{
53 struct va_format vaf;
54 va_list args;
55 int level;
56 struct ntfs_sb_info *sbi = sb->s_fs_info;
57
Kari Argillandere8b8e972021-08-03 14:57:09 +030058 /* Should we use different ratelimits for warnings/notices/errors? */
Konstantin Komarov82cae262021-08-13 17:21:29 +030059 if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
60 return;
61
62 va_start(args, fmt);
63
64 level = printk_get_level(fmt);
65 vaf.fmt = printk_skip_level(fmt);
66 vaf.va = &args;
67 printk("%c%cntfs3: %s: %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf);
68
69 va_end(args);
70}
71
72static char s_name_buf[512];
Kari Argillandere8b8e972021-08-03 14:57:09 +030073static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'.
Konstantin Komarov82cae262021-08-13 17:21:29 +030074
Kari Argillandere8b8e972021-08-03 14:57:09 +030075/*
76 * ntfs_inode_printk
77 *
78 * Print warnings/notices/errors about inode using name or inode number.
79 */
Konstantin Komarov82cae262021-08-13 17:21:29 +030080void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
81{
82 struct super_block *sb = inode->i_sb;
83 struct ntfs_sb_info *sbi = sb->s_fs_info;
84 char *name;
85 va_list args;
86 struct va_format vaf;
87 int level;
88
89 if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
90 return;
91
Kari Argillandere8b8e972021-08-03 14:57:09 +030092 /* Use static allocated buffer, if possible. */
Konstantin Komarov82cae262021-08-13 17:21:29 +030093 name = atomic_dec_and_test(&s_name_buf_cnt)
94 ? s_name_buf
95 : kmalloc(sizeof(s_name_buf), GFP_NOFS);
96
97 if (name) {
98 struct dentry *de = d_find_alias(inode);
99 const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
100
101 if (de) {
102 spin_lock(&de->d_lock);
103 snprintf(name, name_len, " \"%s\"", de->d_name.name);
104 spin_unlock(&de->d_lock);
Kari Argillandere8b8e972021-08-03 14:57:09 +0300105 name[name_len] = 0; /* To be sure. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300106 } else {
107 name[0] = 0;
108 }
Kari Argillandere8b8e972021-08-03 14:57:09 +0300109 dput(de); /* Cocci warns if placed in branch "if (de)" */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300110 }
111
112 va_start(args, fmt);
113
114 level = printk_get_level(fmt);
115 vaf.fmt = printk_skip_level(fmt);
116 vaf.va = &args;
117
118 printk("%c%cntfs3: %s: ino=%lx,%s %pV\n", KERN_SOH_ASCII, level,
119 sb->s_id, inode->i_ino, name ? name : "", &vaf);
120
121 va_end(args);
122
123 atomic_inc(&s_name_buf_cnt);
124 if (name != s_name_buf)
125 kfree(name);
126}
127#endif
128
129/*
130 * Shared memory struct.
131 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300132 * On-disk ntfs's upcase table is created by ntfs formatter.
133 * 'upcase' table is 128K bytes of memory.
134 * We should read it into memory when mounting.
135 * Several ntfs volumes likely use the same 'upcase' table.
136 * It is good idea to share in-memory 'upcase' table between different volumes.
137 * Unfortunately winxp/vista/win7 use different upcase tables.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300138 */
139static DEFINE_SPINLOCK(s_shared_lock);
140
141static struct {
142 void *ptr;
143 u32 len;
144 int cnt;
145} s_shared[8];
146
147/*
148 * ntfs_set_shared
149 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300150 * Return:
151 * * @ptr - If pointer was saved in shared memory.
152 * * NULL - If pointer was not shared.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300153 */
154void *ntfs_set_shared(void *ptr, u32 bytes)
155{
156 void *ret = NULL;
157 int i, j = -1;
158
159 spin_lock(&s_shared_lock);
160 for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
161 if (!s_shared[i].cnt) {
162 j = i;
163 } else if (bytes == s_shared[i].len &&
164 !memcmp(s_shared[i].ptr, ptr, bytes)) {
165 s_shared[i].cnt += 1;
166 ret = s_shared[i].ptr;
167 break;
168 }
169 }
170
171 if (!ret && j != -1) {
172 s_shared[j].ptr = ptr;
173 s_shared[j].len = bytes;
174 s_shared[j].cnt = 1;
175 ret = ptr;
176 }
177 spin_unlock(&s_shared_lock);
178
179 return ret;
180}
181
182/*
183 * ntfs_put_shared
184 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300185 * Return:
186 * * @ptr - If pointer is not shared anymore.
187 * * NULL - If pointer is still shared.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300188 */
189void *ntfs_put_shared(void *ptr)
190{
191 void *ret = ptr;
192 int i;
193
194 spin_lock(&s_shared_lock);
195 for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
196 if (s_shared[i].cnt && s_shared[i].ptr == ptr) {
197 if (--s_shared[i].cnt)
198 ret = NULL;
199 break;
200 }
201 }
202 spin_unlock(&s_shared_lock);
203
204 return ret;
205}
206
Kari Argillander610f8f52021-09-07 18:35:52 +0300207static inline void put_mount_options(struct ntfs_mount_options *options)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300208{
Kari Argillander610f8f52021-09-07 18:35:52 +0300209 kfree(options->nls_name);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300210 unload_nls(options->nls);
Kari Argillander610f8f52021-09-07 18:35:52 +0300211 kfree(options);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300212}
213
214enum Opt {
215 Opt_uid,
216 Opt_gid,
217 Opt_umask,
218 Opt_dmask,
219 Opt_fmask,
220 Opt_immutable,
221 Opt_discard,
222 Opt_force,
223 Opt_sparse,
224 Opt_nohidden,
225 Opt_showmeta,
226 Opt_acl,
Kari Argillandere274cde2021-09-07 18:35:55 +0300227 Opt_iocharset,
Konstantin Komarov82cae262021-08-13 17:21:29 +0300228 Opt_prealloc,
Kari Argillander28a941f2021-09-07 18:35:56 +0300229 Opt_noacsrules,
Konstantin Komarov82cae262021-08-13 17:21:29 +0300230 Opt_err,
231};
232
Kari Argillander610f8f52021-09-07 18:35:52 +0300233static const struct fs_parameter_spec ntfs_fs_parameters[] = {
234 fsparam_u32("uid", Opt_uid),
235 fsparam_u32("gid", Opt_gid),
236 fsparam_u32oct("umask", Opt_umask),
237 fsparam_u32oct("dmask", Opt_dmask),
238 fsparam_u32oct("fmask", Opt_fmask),
239 fsparam_flag_no("sys_immutable", Opt_immutable),
240 fsparam_flag_no("discard", Opt_discard),
241 fsparam_flag_no("force", Opt_force),
242 fsparam_flag_no("sparse", Opt_sparse),
Kari Argillander9d1939f2021-09-07 18:35:54 +0300243 fsparam_flag_no("hidden", Opt_nohidden),
Kari Argillander610f8f52021-09-07 18:35:52 +0300244 fsparam_flag_no("acl", Opt_acl),
245 fsparam_flag_no("showmeta", Opt_showmeta),
Kari Argillander610f8f52021-09-07 18:35:52 +0300246 fsparam_flag_no("prealloc", Opt_prealloc),
Kari Argillander28a941f2021-09-07 18:35:56 +0300247 fsparam_flag_no("acsrules", Opt_noacsrules),
Kari Argillandere274cde2021-09-07 18:35:55 +0300248 fsparam_string("iocharset", Opt_iocharset),
249
250 __fsparam(fs_param_is_string,
251 "nls", Opt_iocharset,
252 fs_param_deprecated, NULL),
Kari Argillander610f8f52021-09-07 18:35:52 +0300253 {}
Konstantin Komarov82cae262021-08-13 17:21:29 +0300254};
255
Kari Argillander610f8f52021-09-07 18:35:52 +0300256/*
257 * Load nls table or if @nls is utf8 then return NULL.
258 */
259static struct nls_table *ntfs_load_nls(char *nls)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300260{
Kari Argillander610f8f52021-09-07 18:35:52 +0300261 struct nls_table *ret;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300262
Kari Argillander610f8f52021-09-07 18:35:52 +0300263 if (!nls)
264 nls = CONFIG_NLS_DEFAULT;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300265
Kari Argillander610f8f52021-09-07 18:35:52 +0300266 if (strcmp(nls, "utf8") == 0)
267 return NULL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300268
Kari Argillander610f8f52021-09-07 18:35:52 +0300269 if (strcmp(nls, CONFIG_NLS_DEFAULT) == 0)
270 return load_nls_default();
Konstantin Komarov82cae262021-08-13 17:21:29 +0300271
Kari Argillander610f8f52021-09-07 18:35:52 +0300272 ret = load_nls(nls);
273 if (ret)
274 return ret;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300275
Kari Argillander610f8f52021-09-07 18:35:52 +0300276 return ERR_PTR(-EINVAL);
277}
278
279static int ntfs_fs_parse_param(struct fs_context *fc,
280 struct fs_parameter *param)
281{
282 struct ntfs_mount_options *opts = fc->fs_private;
283 struct fs_parse_result result;
284 int opt;
285
286 opt = fs_parse(fc, ntfs_fs_parameters, param, &result);
287 if (opt < 0)
288 return opt;
289
290 switch (opt) {
291 case Opt_uid:
292 opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
293 if (!uid_valid(opts->fs_uid))
294 return invalf(fc, "ntfs3: Invalid value for uid.");
Kari Argillander610f8f52021-09-07 18:35:52 +0300295 break;
296 case Opt_gid:
297 opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
298 if (!gid_valid(opts->fs_gid))
299 return invalf(fc, "ntfs3: Invalid value for gid.");
Kari Argillander610f8f52021-09-07 18:35:52 +0300300 break;
301 case Opt_umask:
302 if (result.uint_32 & ~07777)
303 return invalf(fc, "ntfs3: Invalid value for umask.");
304 opts->fs_fmask_inv = ~result.uint_32;
305 opts->fs_dmask_inv = ~result.uint_32;
306 opts->fmask = 1;
307 opts->dmask = 1;
308 break;
309 case Opt_dmask:
310 if (result.uint_32 & ~07777)
311 return invalf(fc, "ntfs3: Invalid value for dmask.");
312 opts->fs_dmask_inv = ~result.uint_32;
313 opts->dmask = 1;
314 break;
315 case Opt_fmask:
316 if (result.uint_32 & ~07777)
317 return invalf(fc, "ntfs3: Invalid value for fmask.");
318 opts->fs_fmask_inv = ~result.uint_32;
319 opts->fmask = 1;
320 break;
321 case Opt_immutable:
322 opts->sys_immutable = result.negated ? 0 : 1;
323 break;
324 case Opt_discard:
325 opts->discard = result.negated ? 0 : 1;
326 break;
327 case Opt_force:
328 opts->force = result.negated ? 0 : 1;
329 break;
330 case Opt_sparse:
331 opts->sparse = result.negated ? 0 : 1;
332 break;
333 case Opt_nohidden:
Kari Argillander9d1939f2021-09-07 18:35:54 +0300334 opts->nohidden = result.negated ? 1 : 0;
Kari Argillander610f8f52021-09-07 18:35:52 +0300335 break;
336 case Opt_acl:
337 if (!result.negated)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300338#ifdef CONFIG_NTFS3_FS_POSIX_ACL
Kari Argillander610f8f52021-09-07 18:35:52 +0300339 fc->sb_flags |= SB_POSIXACL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300340#else
Kari Argillander610f8f52021-09-07 18:35:52 +0300341 return invalf(fc, "ntfs3: Support for ACL not compiled in!");
Konstantin Komarov82cae262021-08-13 17:21:29 +0300342#endif
Kari Argillander610f8f52021-09-07 18:35:52 +0300343 else
344 fc->sb_flags &= ~SB_POSIXACL;
345 break;
346 case Opt_showmeta:
347 opts->showmeta = result.negated ? 0 : 1;
348 break;
Kari Argillandere274cde2021-09-07 18:35:55 +0300349 case Opt_iocharset:
Kari Argillander610f8f52021-09-07 18:35:52 +0300350 kfree(opts->nls_name);
351 opts->nls_name = param->string;
352 param->string = NULL;
353 break;
354 case Opt_prealloc:
355 opts->prealloc = result.negated ? 0 : 1;
356 break;
Kari Argillander28a941f2021-09-07 18:35:56 +0300357 case Opt_noacsrules:
358 opts->noacsrules = result.negated ? 1 : 0;
Kari Argillander610f8f52021-09-07 18:35:52 +0300359 break;
360 default:
361 /* Should not be here unless we forget add case. */
362 return -EINVAL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300363 }
Konstantin Komarov82cae262021-08-13 17:21:29 +0300364 return 0;
365}
366
Kari Argillander610f8f52021-09-07 18:35:52 +0300367static int ntfs_fs_reconfigure(struct fs_context *fc)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300368{
Kari Argillander610f8f52021-09-07 18:35:52 +0300369 struct super_block *sb = fc->root->d_sb;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300370 struct ntfs_sb_info *sbi = sb->s_fs_info;
Kari Argillander610f8f52021-09-07 18:35:52 +0300371 struct ntfs_mount_options *new_opts = fc->fs_private;
372 int ro_rw;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300373
Kari Argillander610f8f52021-09-07 18:35:52 +0300374 ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300375 if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
Kari Argillander610f8f52021-09-07 18:35:52 +0300376 errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
377 return -EINVAL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300378 }
379
Kari Argillander610f8f52021-09-07 18:35:52 +0300380 new_opts->nls = ntfs_load_nls(new_opts->nls_name);
381 if (IS_ERR(new_opts->nls)) {
382 new_opts->nls = NULL;
Kari Argillandere274cde2021-09-07 18:35:55 +0300383 errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name);
Kari Argillander610f8f52021-09-07 18:35:52 +0300384 return -EINVAL;
385 }
386 if (new_opts->nls != sbi->options->nls)
Kari Argillandere274cde2021-09-07 18:35:55 +0300387 return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!");
Kari Argillander610f8f52021-09-07 18:35:52 +0300388
Konstantin Komarov82cae262021-08-13 17:21:29 +0300389 sync_filesystem(sb);
390
391 if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
Kari Argillander610f8f52021-09-07 18:35:52 +0300392 !new_opts->force) {
393 errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!");
394 return -EINVAL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300395 }
396
Kari Argillander610f8f52021-09-07 18:35:52 +0300397 memcpy(sbi->options, new_opts, sizeof(*new_opts));
Konstantin Komarov82cae262021-08-13 17:21:29 +0300398
Kari Argillander610f8f52021-09-07 18:35:52 +0300399 return 0;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300400}
401
402static struct kmem_cache *ntfs_inode_cachep;
403
404static struct inode *ntfs_alloc_inode(struct super_block *sb)
405{
406 struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
407
408 if (!ni)
409 return NULL;
410
411 memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
412
413 mutex_init(&ni->ni_lock);
414
415 return &ni->vfs_inode;
416}
417
418static void ntfs_i_callback(struct rcu_head *head)
419{
420 struct inode *inode = container_of(head, struct inode, i_rcu);
421 struct ntfs_inode *ni = ntfs_i(inode);
422
423 mutex_destroy(&ni->ni_lock);
424
425 kmem_cache_free(ntfs_inode_cachep, ni);
426}
427
428static void ntfs_destroy_inode(struct inode *inode)
429{
430 call_rcu(&inode->i_rcu, ntfs_i_callback);
431}
432
433static void init_once(void *foo)
434{
435 struct ntfs_inode *ni = foo;
436
437 inode_init_once(&ni->vfs_inode);
438}
439
Kari Argillandere8b8e972021-08-03 14:57:09 +0300440/*
441 * put_ntfs - Noinline to reduce binary size.
442 */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300443static noinline void put_ntfs(struct ntfs_sb_info *sbi)
444{
Kari Argillander195c52b2021-08-24 21:37:07 +0300445 kfree(sbi->new_rec);
446 kvfree(ntfs_put_shared(sbi->upcase));
447 kfree(sbi->def_table);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300448
449 wnd_close(&sbi->mft.bitmap);
450 wnd_close(&sbi->used.bitmap);
451
452 if (sbi->mft.ni)
453 iput(&sbi->mft.ni->vfs_inode);
454
455 if (sbi->security.ni)
456 iput(&sbi->security.ni->vfs_inode);
457
458 if (sbi->reparse.ni)
459 iput(&sbi->reparse.ni->vfs_inode);
460
461 if (sbi->objid.ni)
462 iput(&sbi->objid.ni->vfs_inode);
463
464 if (sbi->volume.ni)
465 iput(&sbi->volume.ni->vfs_inode);
466
467 ntfs_update_mftmirr(sbi, 0);
468
469 indx_clear(&sbi->security.index_sii);
470 indx_clear(&sbi->security.index_sdh);
471 indx_clear(&sbi->reparse.index_r);
472 indx_clear(&sbi->objid.index_o);
Kari Argillander195c52b2021-08-24 21:37:07 +0300473 kfree(sbi->compress.lznt);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300474#ifdef CONFIG_NTFS3_LZX_XPRESS
475 xpress_free_decompressor(sbi->compress.xpress);
476 lzx_free_decompressor(sbi->compress.lzx);
477#endif
Kari Argillander195c52b2021-08-24 21:37:07 +0300478 kfree(sbi);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300479}
480
481static void ntfs_put_super(struct super_block *sb)
482{
483 struct ntfs_sb_info *sbi = sb->s_fs_info;
484
Kari Argillandere8b8e972021-08-03 14:57:09 +0300485 /* Mark rw ntfs as clear, if possible. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300486 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
487
Kari Argillander610f8f52021-09-07 18:35:52 +0300488 put_mount_options(sbi->options);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300489 put_ntfs(sbi);
Kari Argillander610f8f52021-09-07 18:35:52 +0300490 sb->s_fs_info = NULL;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300491
492 sync_blockdev(sb->s_bdev);
493}
494
495static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
496{
497 struct super_block *sb = dentry->d_sb;
498 struct ntfs_sb_info *sbi = sb->s_fs_info;
499 struct wnd_bitmap *wnd = &sbi->used.bitmap;
500
501 buf->f_type = sb->s_magic;
502 buf->f_bsize = sbi->cluster_size;
503 buf->f_blocks = wnd->nbits;
504
505 buf->f_bfree = buf->f_bavail = wnd_zeroes(wnd);
506 buf->f_fsid.val[0] = sbi->volume.ser_num;
507 buf->f_fsid.val[1] = (sbi->volume.ser_num >> 32);
508 buf->f_namelen = NTFS_NAME_LEN;
509
510 return 0;
511}
512
513static int ntfs_show_options(struct seq_file *m, struct dentry *root)
514{
515 struct super_block *sb = root->d_sb;
516 struct ntfs_sb_info *sbi = sb->s_fs_info;
Kari Argillander564c97b2021-09-07 18:35:51 +0300517 struct ntfs_mount_options *opts = sbi->options;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300518 struct user_namespace *user_ns = seq_user_ns(m);
519
Kari Argillander15b2ae72021-09-07 18:35:57 +0300520 seq_printf(m, ",uid=%u",
521 from_kuid_munged(user_ns, opts->fs_uid));
522 seq_printf(m, ",gid=%u",
523 from_kgid_munged(user_ns, opts->fs_gid));
Konstantin Komarov82cae262021-08-13 17:21:29 +0300524 if (opts->fmask)
525 seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
526 if (opts->dmask)
527 seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
528 if (opts->nls)
Kari Argillandere274cde2021-09-07 18:35:55 +0300529 seq_printf(m, ",iocharset=%s", opts->nls->charset);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300530 else
Kari Argillandere274cde2021-09-07 18:35:55 +0300531 seq_puts(m, ",iocharset=utf8");
Konstantin Komarov82cae262021-08-13 17:21:29 +0300532 if (opts->sys_immutable)
533 seq_puts(m, ",sys_immutable");
534 if (opts->discard)
535 seq_puts(m, ",discard");
536 if (opts->sparse)
537 seq_puts(m, ",sparse");
538 if (opts->showmeta)
539 seq_puts(m, ",showmeta");
540 if (opts->nohidden)
541 seq_puts(m, ",nohidden");
542 if (opts->force)
543 seq_puts(m, ",force");
Kari Argillander28a941f2021-09-07 18:35:56 +0300544 if (opts->noacsrules)
545 seq_puts(m, ",noacsrules");
Konstantin Komarov82cae262021-08-13 17:21:29 +0300546 if (opts->prealloc)
547 seq_puts(m, ",prealloc");
548 if (sb->s_flags & SB_POSIXACL)
549 seq_puts(m, ",acl");
Konstantin Komarov82cae262021-08-13 17:21:29 +0300550
551 return 0;
552}
553
Kari Argillandere8b8e972021-08-03 14:57:09 +0300554/*
555 * ntfs_sync_fs - super_operations::sync_fs
556 */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300557static int ntfs_sync_fs(struct super_block *sb, int wait)
558{
559 int err = 0, err2;
560 struct ntfs_sb_info *sbi = sb->s_fs_info;
561 struct ntfs_inode *ni;
562 struct inode *inode;
563
564 ni = sbi->security.ni;
565 if (ni) {
566 inode = &ni->vfs_inode;
567 err2 = _ni_write_inode(inode, wait);
568 if (err2 && !err)
569 err = err2;
570 }
571
572 ni = sbi->objid.ni;
573 if (ni) {
574 inode = &ni->vfs_inode;
575 err2 = _ni_write_inode(inode, wait);
576 if (err2 && !err)
577 err = err2;
578 }
579
580 ni = sbi->reparse.ni;
581 if (ni) {
582 inode = &ni->vfs_inode;
583 err2 = _ni_write_inode(inode, wait);
584 if (err2 && !err)
585 err = err2;
586 }
587
588 if (!err)
589 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
590
591 ntfs_update_mftmirr(sbi, wait);
592
593 return err;
594}
595
596static const struct super_operations ntfs_sops = {
597 .alloc_inode = ntfs_alloc_inode,
598 .destroy_inode = ntfs_destroy_inode,
599 .evict_inode = ntfs_evict_inode,
600 .put_super = ntfs_put_super,
601 .statfs = ntfs_statfs,
602 .show_options = ntfs_show_options,
603 .sync_fs = ntfs_sync_fs,
Konstantin Komarov82cae262021-08-13 17:21:29 +0300604 .write_inode = ntfs3_write_inode,
605};
606
607static struct inode *ntfs_export_get_inode(struct super_block *sb, u64 ino,
608 u32 generation)
609{
610 struct MFT_REF ref;
611 struct inode *inode;
612
613 ref.low = cpu_to_le32(ino);
614#ifdef CONFIG_NTFS3_64BIT_CLUSTER
615 ref.high = cpu_to_le16(ino >> 32);
616#else
617 ref.high = 0;
618#endif
619 ref.seq = cpu_to_le16(generation);
620
621 inode = ntfs_iget5(sb, &ref, NULL);
622 if (!IS_ERR(inode) && is_bad_inode(inode)) {
623 iput(inode);
624 inode = ERR_PTR(-ESTALE);
625 }
626
627 return inode;
628}
629
630static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
631 int fh_len, int fh_type)
632{
633 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
634 ntfs_export_get_inode);
635}
636
637static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
638 int fh_len, int fh_type)
639{
640 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
641 ntfs_export_get_inode);
642}
643
644/* TODO: == ntfs_sync_inode */
645static int ntfs_nfs_commit_metadata(struct inode *inode)
646{
647 return _ni_write_inode(inode, 1);
648}
649
650static const struct export_operations ntfs_export_ops = {
651 .fh_to_dentry = ntfs_fh_to_dentry,
652 .fh_to_parent = ntfs_fh_to_parent,
653 .get_parent = ntfs3_get_parent,
654 .commit_metadata = ntfs_nfs_commit_metadata,
655};
656
Kari Argillandere8b8e972021-08-03 14:57:09 +0300657/*
658 * format_size_gb - Return Gb,Mb to print with "%u.%02u Gb".
659 */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300660static u32 format_size_gb(const u64 bytes, u32 *mb)
661{
Kari Argillandere8b8e972021-08-03 14:57:09 +0300662 /* Do simple right 30 bit shift of 64 bit value. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300663 u64 kbytes = bytes >> 10;
664 u32 kbytes32 = kbytes;
665
666 *mb = (100 * (kbytes32 & 0xfffff) + 0x7ffff) >> 20;
667 if (*mb >= 100)
668 *mb = 99;
669
670 return (kbytes32 >> 20) | (((u32)(kbytes >> 32)) << 12);
671}
672
673static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
674{
675 return boot->sectors_per_clusters <= 0x80
676 ? boot->sectors_per_clusters
677 : (1u << (0 - boot->sectors_per_clusters));
678}
679
Kari Argillandere8b8e972021-08-03 14:57:09 +0300680/*
681 * ntfs_init_from_boot - Init internal info from on-disk boot sector.
682 */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300683static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
684 u64 dev_size)
685{
686 struct ntfs_sb_info *sbi = sb->s_fs_info;
687 int err;
688 u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
689 u64 sectors, clusters, fs_size, mlcn, mlcn2;
690 struct NTFS_BOOT *boot;
691 struct buffer_head *bh;
692 struct MFT_REC *rec;
693 u16 fn, ao;
694
695 sbi->volume.blocks = dev_size >> PAGE_SHIFT;
696
697 bh = ntfs_bread(sb, 0);
698 if (!bh)
699 return -EIO;
700
701 err = -EINVAL;
702 boot = (struct NTFS_BOOT *)bh->b_data;
703
704 if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1))
705 goto out;
706
707 /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/
708 /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1])
709 * goto out;
710 */
711
712 boot_sector_size = (u32)boot->bytes_per_sector[1] << 8;
713 if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE ||
Kari Argillander528c9b32021-08-16 13:37:32 +0300714 !is_power_of_2(boot_sector_size)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300715 goto out;
716 }
717
718 /* cluster size: 512, 1K, 2K, 4K, ... 2M */
719 sct_per_clst = true_sectors_per_clst(boot);
Kari Argillander528c9b32021-08-16 13:37:32 +0300720 if (!is_power_of_2(sct_per_clst))
Konstantin Komarov82cae262021-08-13 17:21:29 +0300721 goto out;
722
723 mlcn = le64_to_cpu(boot->mft_clst);
724 mlcn2 = le64_to_cpu(boot->mft2_clst);
725 sectors = le64_to_cpu(boot->sectors_per_volume);
726
727 if (mlcn * sct_per_clst >= sectors)
728 goto out;
729
730 if (mlcn2 * sct_per_clst >= sectors)
731 goto out;
732
Kari Argillandere8b8e972021-08-03 14:57:09 +0300733 /* Check MFT record size. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300734 if ((boot->record_size < 0 &&
735 SECTOR_SIZE > (2U << (-boot->record_size))) ||
Kari Argillander528c9b32021-08-16 13:37:32 +0300736 (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300737 goto out;
738 }
739
Kari Argillandere8b8e972021-08-03 14:57:09 +0300740 /* Check index record size. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300741 if ((boot->index_size < 0 &&
742 SECTOR_SIZE > (2U << (-boot->index_size))) ||
Kari Argillander528c9b32021-08-16 13:37:32 +0300743 (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300744 goto out;
745 }
746
747 sbi->sector_size = boot_sector_size;
748 sbi->sector_bits = blksize_bits(boot_sector_size);
749 fs_size = (sectors + 1) << sbi->sector_bits;
750
751 gb = format_size_gb(fs_size, &mb);
752
753 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300754 * - Volume formatted and mounted with the same sector size.
755 * - Volume formatted 4K and mounted as 512.
756 * - Volume formatted 512 and mounted as 4K.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300757 */
758 if (sbi->sector_size != sector_size) {
759 ntfs_warn(sb,
760 "Different NTFS' sector size and media sector size");
761 dev_size += sector_size - 1;
762 }
763
764 sbi->cluster_size = boot_sector_size * sct_per_clst;
765 sbi->cluster_bits = blksize_bits(sbi->cluster_size);
766
767 sbi->mft.lbo = mlcn << sbi->cluster_bits;
768 sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
769
770 if (sbi->cluster_size < sbi->sector_size)
771 goto out;
772
773 sbi->cluster_mask = sbi->cluster_size - 1;
774 sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
775 sbi->record_size = record_size = boot->record_size < 0
776 ? 1 << (-boot->record_size)
777 : (u32)boot->record_size
778 << sbi->cluster_bits;
779
780 if (record_size > MAXIMUM_BYTES_PER_MFT)
781 goto out;
782
783 sbi->record_bits = blksize_bits(record_size);
784 sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
785
786 sbi->max_bytes_per_attr =
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300787 record_size - ALIGN(MFTRECORD_FIXUP_OFFSET_1, 8) -
788 ALIGN(((record_size >> SECTOR_SHIFT) * sizeof(short)), 8) -
789 ALIGN(sizeof(enum ATTR_TYPE), 8);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300790
791 sbi->index_size = boot->index_size < 0
792 ? 1u << (-boot->index_size)
793 : (u32)boot->index_size << sbi->cluster_bits;
794
795 sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
796 sbi->volume.size = sectors << sbi->sector_bits;
797
Kari Argillandere8b8e972021-08-03 14:57:09 +0300798 /* Warning if RAW volume. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300799 if (dev_size < fs_size) {
800 u32 mb0, gb0;
801
802 gb0 = format_size_gb(dev_size, &mb0);
803 ntfs_warn(
804 sb,
805 "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only",
806 gb, mb, gb0, mb0);
807 sb->s_flags |= SB_RDONLY;
808 }
809
810 clusters = sbi->volume.size >> sbi->cluster_bits;
811#ifndef CONFIG_NTFS3_64BIT_CLUSTER
Kari Argillandere8b8e972021-08-03 14:57:09 +0300812 /* 32 bits per cluster. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300813 if (clusters >> 32) {
814 ntfs_notice(
815 sb,
816 "NTFS %u.%02u Gb is too big to use 32 bits per cluster",
817 gb, mb);
818 goto out;
819 }
820#elif BITS_PER_LONG < 64
821#error "CONFIG_NTFS3_64BIT_CLUSTER incompatible in 32 bit OS"
822#endif
823
824 sbi->used.bitmap.nbits = clusters;
825
Kari Argillander195c52b2021-08-24 21:37:07 +0300826 rec = kzalloc(record_size, GFP_NOFS);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300827 if (!rec) {
828 err = -ENOMEM;
829 goto out;
830 }
831
832 sbi->new_rec = rec;
833 rec->rhdr.sign = NTFS_FILE_SIGNATURE;
834 rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
835 fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
836 rec->rhdr.fix_num = cpu_to_le16(fn);
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300837 ao = ALIGN(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn, 8);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300838 rec->attr_off = cpu_to_le16(ao);
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300839 rec->used = cpu_to_le32(ao + ALIGN(sizeof(enum ATTR_TYPE), 8));
Konstantin Komarov82cae262021-08-13 17:21:29 +0300840 rec->total = cpu_to_le32(sbi->record_size);
841 ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
842
843 if (sbi->cluster_size < PAGE_SIZE)
844 sb_set_blocksize(sb, sbi->cluster_size);
845
846 sbi->block_mask = sb->s_blocksize - 1;
847 sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
848 sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
849
Kari Argillandere8b8e972021-08-03 14:57:09 +0300850 /* Maximum size for normal files. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300851 sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
852
853#ifdef CONFIG_NTFS3_64BIT_CLUSTER
854 if (clusters >= (1ull << (64 - sbi->cluster_bits)))
855 sbi->maxbytes = -1;
856 sbi->maxbytes_sparse = -1;
857#else
Kari Argillandere8b8e972021-08-03 14:57:09 +0300858 /* Maximum size for sparse file. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300859 sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
860#endif
861
862 err = 0;
863
864out:
865 brelse(bh);
866
867 return err;
868}
869
Kari Argillandere8b8e972021-08-03 14:57:09 +0300870/*
871 * ntfs_fill_super - Try to mount.
872 */
Kari Argillander610f8f52021-09-07 18:35:52 +0300873static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300874{
875 int err;
Kari Argillander610f8f52021-09-07 18:35:52 +0300876 struct ntfs_sb_info *sbi = sb->s_fs_info;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300877 struct block_device *bdev = sb->s_bdev;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300878 struct request_queue *rq = bdev_get_queue(bdev);
Kari Argillander10b4f122021-09-09 21:09:36 +0300879 struct inode *inode;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300880 struct ntfs_inode *ni;
881 size_t i, tt;
882 CLST vcn, lcn, len;
883 struct ATTRIB *attr;
884 const struct VOLUME_INFO *info;
885 u32 idx, done, bytes;
886 struct ATTR_DEF_ENTRY *t;
Kari Argillander27fac772021-09-07 18:35:53 +0300887 u16 *upcase;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300888 u16 *shared;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300889 struct MFT_REF ref;
890
891 ref.high = 0;
892
Konstantin Komarov82cae262021-08-13 17:21:29 +0300893 sbi->sb = sb;
894 sb->s_flags |= SB_NODIRATIME;
895 sb->s_magic = 0x7366746e; // "ntfs"
896 sb->s_op = &ntfs_sops;
897 sb->s_export_op = &ntfs_export_ops;
898 sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
899 sb->s_xattr = ntfs_xattr_handlers;
900
Kari Argillander610f8f52021-09-07 18:35:52 +0300901 sbi->options->nls = ntfs_load_nls(sbi->options->nls_name);
902 if (IS_ERR(sbi->options->nls)) {
903 sbi->options->nls = NULL;
904 errorf(fc, "Cannot load nls %s", sbi->options->nls_name);
905 return -EINVAL;
906 }
Konstantin Komarov82cae262021-08-13 17:21:29 +0300907
908 if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
909 ;
910 } else {
911 sbi->discard_granularity = rq->limits.discard_granularity;
912 sbi->discard_granularity_mask_inv =
913 ~(u64)(sbi->discard_granularity - 1);
914 }
915
916 sb_set_blocksize(sb, PAGE_SIZE);
917
Kari Argillandere8b8e972021-08-03 14:57:09 +0300918 /* Parse boot. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300919 err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
Kari Argillander4ea41b32021-09-09 21:09:39 +0300920 bdev->bd_inode->i_size);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300921 if (err)
Kari Argillanderbce18282021-09-09 21:09:35 +0300922 return err;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300923
924#ifdef CONFIG_NTFS3_64BIT_CLUSTER
925 sb->s_maxbytes = MAX_LFS_FILESIZE;
926#else
927 sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
928#endif
929
Konstantin Komarov82cae262021-08-13 17:21:29 +0300930 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300931 * Load $Volume. This should be done before $LogFile
932 * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300933 */
934 ref.low = cpu_to_le32(MFT_REC_VOL);
935 ref.seq = cpu_to_le16(MFT_REC_VOL);
936 inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
937 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300938 ntfs_err(sb, "Failed to load $Volume.");
Kari Argillanderbce18282021-09-09 21:09:35 +0300939 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300940 }
941
942 ni = ntfs_i(inode);
943
Kari Argillandere8b8e972021-08-03 14:57:09 +0300944 /* Load and save label (not necessary). */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300945 attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
946
947 if (!attr) {
948 /* It is ok if no ATTR_LABEL */
949 } else if (!attr->non_res && !is_attr_ext(attr)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300950 /* $AttrDef allows labels to be up to 128 symbols. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300951 err = utf16s_to_utf8s(resident_data(attr),
952 le32_to_cpu(attr->res.data_size) >> 1,
953 UTF16_LITTLE_ENDIAN, sbi->volume.label,
954 sizeof(sbi->volume.label));
955 if (err < 0)
956 sbi->volume.label[0] = 0;
957 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300958 /* Should we break mounting here? */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300959 //err = -EINVAL;
960 //goto out;
961 }
962
963 attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
964 if (!attr || is_attr_ext(attr)) {
965 err = -EINVAL;
966 goto out;
967 }
968
969 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
970 if (!info) {
971 err = -EINVAL;
972 goto out;
973 }
974
975 sbi->volume.major_ver = info->major_ver;
976 sbi->volume.minor_ver = info->minor_ver;
977 sbi->volume.flags = info->flags;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300978 sbi->volume.ni = ni;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300979
Kari Argillandere8b8e972021-08-03 14:57:09 +0300980 /* Load $MFTMirr to estimate recs_mirr. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300981 ref.low = cpu_to_le32(MFT_REC_MIRR);
982 ref.seq = cpu_to_le16(MFT_REC_MIRR);
983 inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
984 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300985 ntfs_err(sb, "Failed to load $MFTMirr.");
Kari Argillanderbce18282021-09-09 21:09:35 +0300986 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300987 }
988
989 sbi->mft.recs_mirr =
990 ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits;
991
992 iput(inode);
993
Konstantin Komarovd3624462021-08-31 16:57:40 +0300994 /* Load LogFile to replay. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300995 ref.low = cpu_to_le32(MFT_REC_LOG);
996 ref.seq = cpu_to_le16(MFT_REC_LOG);
997 inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
998 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300999 ntfs_err(sb, "Failed to load \x24LogFile.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001000 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001001 }
1002
1003 ni = ntfs_i(inode);
1004
1005 err = ntfs_loadlog_and_replay(ni, sbi);
1006 if (err)
1007 goto out;
1008
1009 iput(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001010
Konstantin Komarov82cae262021-08-13 17:21:29 +03001011 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
Kari Argillander0cde7e812021-09-09 21:09:38 +03001012 if (!sb_rdonly(sb)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001013 ntfs_warn(sb,
1014 "failed to replay log file. Can't mount rw!");
Kari Argillanderbce18282021-09-09 21:09:35 +03001015 return -EINVAL;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001016 }
1017 } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
Kari Argillander0cde7e812021-09-09 21:09:38 +03001018 if (!sb_rdonly(sb) && !sbi->options->force) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001019 ntfs_warn(
1020 sb,
1021 "volume is dirty and \"force\" flag is not set!");
Kari Argillanderbce18282021-09-09 21:09:35 +03001022 return -EINVAL;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001023 }
1024 }
1025
Kari Argillandere8b8e972021-08-03 14:57:09 +03001026 /* Load $MFT. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001027 ref.low = cpu_to_le32(MFT_REC_MFT);
1028 ref.seq = cpu_to_le16(1);
1029
1030 inode = ntfs_iget5(sb, &ref, &NAME_MFT);
1031 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001032 ntfs_err(sb, "Failed to load $MFT.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001033 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001034 }
1035
1036 ni = ntfs_i(inode);
1037
1038 sbi->mft.used = ni->i_valid >> sbi->record_bits;
1039 tt = inode->i_size >> sbi->record_bits;
1040 sbi->mft.next_free = MFT_REC_USER;
1041
1042 err = wnd_init(&sbi->mft.bitmap, sb, tt);
1043 if (err)
1044 goto out;
1045
1046 err = ni_load_all_mi(ni);
1047 if (err)
1048 goto out;
1049
1050 sbi->mft.ni = ni;
1051
Kari Argillandere8b8e972021-08-03 14:57:09 +03001052 /* Load $BadClus. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001053 ref.low = cpu_to_le32(MFT_REC_BADCLUST);
1054 ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
1055 inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
1056 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001057 ntfs_err(sb, "Failed to load $BadClus.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001058 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001059 }
1060
1061 ni = ntfs_i(inode);
1062
1063 for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
1064 if (lcn == SPARSE_LCN)
1065 continue;
1066
1067 if (!sbi->bad_clusters)
1068 ntfs_notice(sb, "Volume contains bad blocks");
1069
1070 sbi->bad_clusters += len;
1071 }
1072
1073 iput(inode);
1074
Kari Argillandere8b8e972021-08-03 14:57:09 +03001075 /* Load $Bitmap. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001076 ref.low = cpu_to_le32(MFT_REC_BITMAP);
1077 ref.seq = cpu_to_le16(MFT_REC_BITMAP);
1078 inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
1079 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001080 ntfs_err(sb, "Failed to load $Bitmap.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001081 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001082 }
1083
Konstantin Komarov82cae262021-08-13 17:21:29 +03001084#ifndef CONFIG_NTFS3_64BIT_CLUSTER
1085 if (inode->i_size >> 32) {
1086 err = -EINVAL;
1087 goto out;
1088 }
1089#endif
1090
Kari Argillandere8b8e972021-08-03 14:57:09 +03001091 /* Check bitmap boundary. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001092 tt = sbi->used.bitmap.nbits;
1093 if (inode->i_size < bitmap_size(tt)) {
1094 err = -EINVAL;
1095 goto out;
1096 }
1097
Kari Argillandere8b8e972021-08-03 14:57:09 +03001098 /* Not necessary. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001099 sbi->used.bitmap.set_tail = true;
Kari Argillanderb4f110d2021-09-09 21:09:37 +03001100 err = wnd_init(&sbi->used.bitmap, sb, tt);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001101 if (err)
1102 goto out;
1103
1104 iput(inode);
1105
Kari Argillandere8b8e972021-08-03 14:57:09 +03001106 /* Compute the MFT zone. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001107 err = ntfs_refresh_zone(sbi);
1108 if (err)
Kari Argillanderbce18282021-09-09 21:09:35 +03001109 return err;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001110
Kari Argillandere8b8e972021-08-03 14:57:09 +03001111 /* Load $AttrDef. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001112 ref.low = cpu_to_le32(MFT_REC_ATTR);
1113 ref.seq = cpu_to_le16(MFT_REC_ATTR);
Kari Argillanderb4f110d2021-09-09 21:09:37 +03001114 inode = ntfs_iget5(sb, &ref, &NAME_ATTRDEF);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001115 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001116 ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
Kari Argillanderbce18282021-09-09 21:09:35 +03001117 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001118 }
1119
1120 if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
1121 err = -EINVAL;
1122 goto out;
1123 }
1124 bytes = inode->i_size;
Kari Argillander195c52b2021-08-24 21:37:07 +03001125 sbi->def_table = t = kmalloc(bytes, GFP_NOFS);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001126 if (!t) {
1127 err = -ENOMEM;
1128 goto out;
1129 }
1130
1131 for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
1132 unsigned long tail = bytes - done;
1133 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1134
1135 if (IS_ERR(page)) {
1136 err = PTR_ERR(page);
1137 goto out;
1138 }
1139 memcpy(Add2Ptr(t, done), page_address(page),
1140 min(PAGE_SIZE, tail));
1141 ntfs_unmap_page(page);
1142
1143 if (!idx && ATTR_STD != t->type) {
1144 err = -EINVAL;
1145 goto out;
1146 }
1147 }
1148
1149 t += 1;
1150 sbi->def_entries = 1;
1151 done = sizeof(struct ATTR_DEF_ENTRY);
1152 sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
Colin Ian Kingf8d87ed2021-08-16 11:13:08 +01001153 sbi->ea_max_size = 0x10000; /* default formatter value */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001154
1155 while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
1156 u32 t32 = le32_to_cpu(t->type);
1157 u64 sz = le64_to_cpu(t->max_sz);
1158
1159 if ((t32 & 0xF) || le32_to_cpu(t[-1].type) >= t32)
1160 break;
1161
1162 if (t->type == ATTR_REPARSE)
1163 sbi->reparse.max_size = sz;
1164 else if (t->type == ATTR_EA)
1165 sbi->ea_max_size = sz;
1166
1167 done += sizeof(struct ATTR_DEF_ENTRY);
1168 t += 1;
1169 sbi->def_entries += 1;
1170 }
1171 iput(inode);
1172
Kari Argillandere8b8e972021-08-03 14:57:09 +03001173 /* Load $UpCase. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001174 ref.low = cpu_to_le32(MFT_REC_UPCASE);
1175 ref.seq = cpu_to_le16(MFT_REC_UPCASE);
1176 inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
1177 if (IS_ERR(inode)) {
Kari Argillander04120162021-09-09 21:09:32 +03001178 ntfs_err(sb, "Failed to load $UpCase.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001179 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001180 }
1181
Konstantin Komarov82cae262021-08-13 17:21:29 +03001182 if (inode->i_size != 0x10000 * sizeof(short)) {
1183 err = -EINVAL;
1184 goto out;
1185 }
1186
Kari Argillander27fac772021-09-07 18:35:53 +03001187 upcase = sbi->upcase;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001188
1189 for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
1190 const __le16 *src;
1191 u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
1192 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1193
1194 if (IS_ERR(page)) {
1195 err = PTR_ERR(page);
1196 goto out;
1197 }
1198
1199 src = page_address(page);
1200
1201#ifdef __BIG_ENDIAN
1202 for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
1203 *dst++ = le16_to_cpu(*src++);
1204#else
1205 memcpy(dst, src, PAGE_SIZE);
1206#endif
1207 ntfs_unmap_page(page);
1208 }
1209
1210 shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
1211 if (shared && upcase != shared) {
1212 sbi->upcase = shared;
Kari Argillander195c52b2021-08-24 21:37:07 +03001213 kvfree(upcase);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001214 }
1215
1216 iput(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001217
1218 if (is_ntfs3(sbi)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001219 /* Load $Secure. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001220 err = ntfs_security_init(sbi);
1221 if (err)
Kari Argillanderbce18282021-09-09 21:09:35 +03001222 return err;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001223
Kari Argillandere8b8e972021-08-03 14:57:09 +03001224 /* Load $Extend. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001225 err = ntfs_extend_init(sbi);
1226 if (err)
1227 goto load_root;
1228
Kari Argillandere8b8e972021-08-03 14:57:09 +03001229 /* Load $Extend\$Reparse. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001230 err = ntfs_reparse_init(sbi);
1231 if (err)
1232 goto load_root;
1233
Kari Argillandere8b8e972021-08-03 14:57:09 +03001234 /* Load $Extend\$ObjId. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001235 err = ntfs_objid_init(sbi);
1236 if (err)
1237 goto load_root;
1238 }
1239
1240load_root:
Kari Argillandere8b8e972021-08-03 14:57:09 +03001241 /* Load root. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001242 ref.low = cpu_to_le32(MFT_REC_ROOT);
1243 ref.seq = cpu_to_le16(MFT_REC_ROOT);
1244 inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
1245 if (IS_ERR(inode)) {
Konstantin Komarov82cae262021-08-13 17:21:29 +03001246 ntfs_err(sb, "Failed to load root.");
Kari Argillanderbce18282021-09-09 21:09:35 +03001247 return PTR_ERR(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001248 }
1249
Konstantin Komarov82cae262021-08-13 17:21:29 +03001250 sb->s_root = d_make_root(inode);
Kari Argillanderbce18282021-09-09 21:09:35 +03001251 if (!sb->s_root)
1252 return -ENOMEM;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001253
Kari Argillander610f8f52021-09-07 18:35:52 +03001254 fc->fs_private = NULL;
1255 fc->s_fs_info = NULL;
1256
Konstantin Komarov82cae262021-08-13 17:21:29 +03001257 return 0;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001258out:
1259 iput(inode);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001260 return err;
1261}
1262
1263void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
1264{
1265 struct ntfs_sb_info *sbi = sb->s_fs_info;
1266 struct block_device *bdev = sb->s_bdev;
1267 sector_t devblock = (u64)lcn * sbi->blocks_per_cluster;
1268 unsigned long blocks = (u64)len * sbi->blocks_per_cluster;
1269 unsigned long cnt = 0;
1270 unsigned long limit = global_zone_page_state(NR_FREE_PAGES)
1271 << (PAGE_SHIFT - sb->s_blocksize_bits);
1272
1273 if (limit >= 0x2000)
1274 limit -= 0x1000;
1275 else if (limit < 32)
1276 limit = 32;
1277 else
1278 limit >>= 1;
1279
1280 while (blocks--) {
1281 clean_bdev_aliases(bdev, devblock++, 1);
1282 if (cnt++ >= limit) {
1283 sync_blockdev(bdev);
1284 cnt = 0;
1285 }
1286 }
1287}
1288
1289/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001290 * ntfs_discard - Issue a discard request (trim for SSD).
Konstantin Komarov82cae262021-08-13 17:21:29 +03001291 */
1292int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
1293{
1294 int err;
1295 u64 lbo, bytes, start, end;
1296 struct super_block *sb;
1297
1298 if (sbi->used.next_free_lcn == lcn + len)
1299 sbi->used.next_free_lcn = lcn;
1300
1301 if (sbi->flags & NTFS_FLAGS_NODISCARD)
1302 return -EOPNOTSUPP;
1303
Kari Argillander564c97b2021-09-07 18:35:51 +03001304 if (!sbi->options->discard)
Konstantin Komarov82cae262021-08-13 17:21:29 +03001305 return -EOPNOTSUPP;
1306
1307 lbo = (u64)lcn << sbi->cluster_bits;
1308 bytes = (u64)len << sbi->cluster_bits;
1309
Kari Argillandere8b8e972021-08-03 14:57:09 +03001310 /* Align up 'start' on discard_granularity. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001311 start = (lbo + sbi->discard_granularity - 1) &
1312 sbi->discard_granularity_mask_inv;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001313 /* Align down 'end' on discard_granularity. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001314 end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
1315
1316 sb = sbi->sb;
1317 if (start >= end)
1318 return 0;
1319
1320 err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
1321 GFP_NOFS, 0);
1322
1323 if (err == -EOPNOTSUPP)
1324 sbi->flags |= NTFS_FLAGS_NODISCARD;
1325
1326 return err;
1327}
1328
Kari Argillander610f8f52021-09-07 18:35:52 +03001329static int ntfs_fs_get_tree(struct fs_context *fc)
Konstantin Komarov82cae262021-08-13 17:21:29 +03001330{
Kari Argillander610f8f52021-09-07 18:35:52 +03001331 return get_tree_bdev(fc, ntfs_fill_super);
1332}
1333
1334/*
1335 * ntfs_fs_free - Free fs_context.
1336 *
1337 * Note that this will be called after fill_super and reconfigure
1338 * even when they pass. So they have to take pointers if they pass.
1339 */
1340static void ntfs_fs_free(struct fs_context *fc)
1341{
1342 struct ntfs_mount_options *opts = fc->fs_private;
1343 struct ntfs_sb_info *sbi = fc->s_fs_info;
1344
1345 if (sbi)
1346 put_ntfs(sbi);
1347
1348 if (opts)
1349 put_mount_options(opts);
1350}
1351
1352static const struct fs_context_operations ntfs_context_ops = {
1353 .parse_param = ntfs_fs_parse_param,
1354 .get_tree = ntfs_fs_get_tree,
1355 .reconfigure = ntfs_fs_reconfigure,
1356 .free = ntfs_fs_free,
1357};
1358
1359/*
1360 * ntfs_init_fs_context - Initialize spi and opts
1361 *
1362 * This will called when mount/remount. We will first initiliaze
1363 * options so that if remount we can use just that.
1364 */
1365static int ntfs_init_fs_context(struct fs_context *fc)
1366{
1367 struct ntfs_mount_options *opts;
1368 struct ntfs_sb_info *sbi;
1369
1370 opts = kzalloc(sizeof(struct ntfs_mount_options), GFP_NOFS);
1371 if (!opts)
1372 return -ENOMEM;
1373
1374 /* Default options. */
1375 opts->fs_uid = current_uid();
1376 opts->fs_gid = current_gid();
1377 opts->fs_fmask_inv = ~current_umask();
1378 opts->fs_dmask_inv = ~current_umask();
1379
1380 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE)
1381 goto ok;
1382
1383 sbi = kzalloc(sizeof(struct ntfs_sb_info), GFP_NOFS);
Kari Argillander27fac772021-09-07 18:35:53 +03001384 if (!sbi)
1385 goto free_opts;
1386
1387 sbi->upcase = kvmalloc(0x10000 * sizeof(short), GFP_KERNEL);
1388 if (!sbi->upcase)
1389 goto free_sbi;
1390
1391 ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
1392 DEFAULT_RATELIMIT_BURST);
1393
1394 mutex_init(&sbi->compress.mtx_lznt);
1395#ifdef CONFIG_NTFS3_LZX_XPRESS
1396 mutex_init(&sbi->compress.mtx_xpress);
1397 mutex_init(&sbi->compress.mtx_lzx);
1398#endif
Kari Argillander610f8f52021-09-07 18:35:52 +03001399
1400 sbi->options = opts;
1401 fc->s_fs_info = sbi;
1402ok:
1403 fc->fs_private = opts;
1404 fc->ops = &ntfs_context_ops;
1405
1406 return 0;
Kari Argillander27fac772021-09-07 18:35:53 +03001407free_opts:
1408 kfree(opts);
1409free_sbi:
1410 kfree(sbi);
1411 return -ENOMEM;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001412}
1413
1414// clang-format off
1415static struct file_system_type ntfs_fs_type = {
Kari Argillander610f8f52021-09-07 18:35:52 +03001416 .owner = THIS_MODULE,
1417 .name = "ntfs3",
1418 .init_fs_context = ntfs_init_fs_context,
1419 .parameters = ntfs_fs_parameters,
1420 .kill_sb = kill_block_super,
1421 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
Konstantin Komarov82cae262021-08-13 17:21:29 +03001422};
1423// clang-format on
1424
1425static int __init init_ntfs_fs(void)
1426{
1427 int err;
1428
Kari Argillander2e3a51b2021-08-29 17:42:39 +03001429 pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001430
Kari Argillander2e3a51b2021-08-29 17:42:39 +03001431 if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
1432 pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
1433 if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
1434 pr_notice("ntfs3: Warning: Activated 64 bits per cluster. Windows does not support this\n");
1435 if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
1436 pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
Konstantin Komarov82cae262021-08-13 17:21:29 +03001437
1438 err = ntfs3_init_bitmap();
1439 if (err)
1440 return err;
1441
1442 ntfs_inode_cachep = kmem_cache_create(
1443 "ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
1444 (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1445 init_once);
1446 if (!ntfs_inode_cachep) {
1447 err = -ENOMEM;
1448 goto out1;
1449 }
1450
1451 err = register_filesystem(&ntfs_fs_type);
1452 if (err)
1453 goto out;
1454
1455 return 0;
1456out:
1457 kmem_cache_destroy(ntfs_inode_cachep);
1458out1:
1459 ntfs3_exit_bitmap();
1460 return err;
1461}
1462
1463static void __exit exit_ntfs_fs(void)
1464{
1465 if (ntfs_inode_cachep) {
1466 rcu_barrier();
1467 kmem_cache_destroy(ntfs_inode_cachep);
1468 }
1469
1470 unregister_filesystem(&ntfs_fs_type);
1471 ntfs3_exit_bitmap();
1472}
1473
1474MODULE_LICENSE("GPL");
1475MODULE_DESCRIPTION("ntfs3 read/write filesystem");
Konstantin Komarov82cae262021-08-13 17:21:29 +03001476#ifdef CONFIG_NTFS3_FS_POSIX_ACL
1477MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
1478#endif
1479#ifdef CONFIG_NTFS3_64BIT_CLUSTER
Kari Argillander2e3a51b2021-08-29 17:42:39 +03001480MODULE_INFO(cluster, "Warning: Activated 64 bits per cluster. Windows does not support this");
Konstantin Komarov82cae262021-08-13 17:21:29 +03001481#endif
1482#ifdef CONFIG_NTFS3_LZX_XPRESS
1483MODULE_INFO(compression, "Read-only lzx/xpress compression included");
1484#endif
1485
1486MODULE_AUTHOR("Konstantin Komarov");
1487MODULE_ALIAS_FS("ntfs3");
1488
1489module_init(init_ntfs_fs);
1490module_exit(exit_ntfs_fs);