blob: 77d2e56d5b4f34a693765d3230cf032122a04239 [file] [log] [blame]
Konstantin Komarov82cae262021-08-13 17:21:29 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8#include <linux/blkdev.h>
9#include <linux/buffer_head.h>
10#include <linux/fs.h>
Kari Argillanderb5322eb2021-09-07 17:28:41 +030011#include <linux/kernel.h>
Konstantin Komarov82cae262021-08-13 17:21:29 +030012
13#include "debug.h"
14#include "ntfs.h"
15#include "ntfs_fs.h"
16
17// clang-format off
18const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
20};
21const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23};
24const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26};
27const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29};
30const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32};
33const struct cpu_str NAME_ROOT = {
34 1, 0, { '.' },
35};
36const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38};
39const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
41};
42const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44};
45const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47};
48const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50};
51const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53};
54const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56};
57const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59};
60const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62};
63const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65};
66const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68};
69const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71};
72const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74};
75const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77};
78const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80};
81const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
83};
84const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
86};
87const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
89};
90
91#ifdef CONFIG_NTFS3_LZX_XPRESS
92const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 cpu_to_le16('a'),
98};
99#endif
100
101// clang-format on
102
103/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300104 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300105 */
106bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
107{
108 u16 *fixup, *ptr;
109 u16 sample;
110 u16 fo = le16_to_cpu(rhdr->fix_off);
111 u16 fn = le16_to_cpu(rhdr->fix_num);
112
113 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
114 fn * SECTOR_SIZE > bytes) {
115 return false;
116 }
117
Kari Argillandere8b8e972021-08-03 14:57:09 +0300118 /* Get fixup pointer. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300119 fixup = Add2Ptr(rhdr, fo);
120
121 if (*fixup >= 0x7FFF)
122 *fixup = 1;
123 else
124 *fixup += 1;
125
126 sample = *fixup;
127
128 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
129
130 while (fn--) {
131 *++fixup = *ptr;
132 *ptr = sample;
133 ptr += SECTOR_SIZE / sizeof(short);
134 }
135 return true;
136}
137
138/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300139 * ntfs_fix_post_read - Remove fixups after reading from disk.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300140 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300141 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300142 */
143int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
144 bool simple)
145{
146 int ret;
147 u16 *fixup, *ptr;
148 u16 sample, fo, fn;
149
150 fo = le16_to_cpu(rhdr->fix_off);
151 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
152 : le16_to_cpu(rhdr->fix_num);
153
Kari Argillandere8b8e972021-08-03 14:57:09 +0300154 /* Check errors. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300155 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
156 fn * SECTOR_SIZE > bytes) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300157 return -EINVAL; /* Native chkntfs returns ok! */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300158 }
159
Kari Argillandere8b8e972021-08-03 14:57:09 +0300160 /* Get fixup pointer. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300161 fixup = Add2Ptr(rhdr, fo);
162 sample = *fixup;
163 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
164 ret = 0;
165
166 while (fn--) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300167 /* Test current word. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300168 if (*ptr != sample) {
169 /* Fixup does not match! Is it serious error? */
170 ret = -E_NTFS_FIXUP;
171 }
172
Kari Argillandere8b8e972021-08-03 14:57:09 +0300173 /* Replace fixup. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300174 *ptr = *++fixup;
175 ptr += SECTOR_SIZE / sizeof(short);
176 }
177
178 return ret;
179}
180
181/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300182 * ntfs_extend_init - Load $Extend file.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300183 */
184int ntfs_extend_init(struct ntfs_sb_info *sbi)
185{
186 int err;
187 struct super_block *sb = sbi->sb;
188 struct inode *inode, *inode2;
189 struct MFT_REF ref;
190
191 if (sbi->volume.major_ver < 3) {
192 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
193 return 0;
194 }
195
196 ref.low = cpu_to_le32(MFT_REC_EXTEND);
197 ref.high = 0;
198 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
199 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
200 if (IS_ERR(inode)) {
201 err = PTR_ERR(inode);
202 ntfs_err(sb, "Failed to load $Extend.");
203 inode = NULL;
204 goto out;
205 }
206
Kari Argillandere8b8e972021-08-03 14:57:09 +0300207 /* If ntfs_iget5() reads from disk it never returns bad inode. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300208 if (!S_ISDIR(inode->i_mode)) {
209 err = -EINVAL;
210 goto out;
211 }
212
213 /* Try to find $ObjId */
214 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
215 if (inode2 && !IS_ERR(inode2)) {
216 if (is_bad_inode(inode2)) {
217 iput(inode2);
218 } else {
219 sbi->objid.ni = ntfs_i(inode2);
220 sbi->objid_no = inode2->i_ino;
221 }
222 }
223
224 /* Try to find $Quota */
225 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
226 if (inode2 && !IS_ERR(inode2)) {
227 sbi->quota_no = inode2->i_ino;
228 iput(inode2);
229 }
230
231 /* Try to find $Reparse */
232 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
233 if (inode2 && !IS_ERR(inode2)) {
234 sbi->reparse.ni = ntfs_i(inode2);
235 sbi->reparse_no = inode2->i_ino;
236 }
237
238 /* Try to find $UsnJrnl */
239 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
240 if (inode2 && !IS_ERR(inode2)) {
241 sbi->usn_jrnl_no = inode2->i_ino;
242 iput(inode2);
243 }
244
245 err = 0;
246out:
247 iput(inode);
248 return err;
249}
250
251int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
252{
253 int err = 0;
254 struct super_block *sb = sbi->sb;
255 bool initialized = false;
256 struct MFT_REF ref;
257 struct inode *inode;
258
Kari Argillandere8b8e972021-08-03 14:57:09 +0300259 /* Check for 4GB. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300260 if (ni->vfs_inode.i_size >= 0x100000000ull) {
261 ntfs_err(sb, "\x24LogFile is too big");
262 err = -EINVAL;
263 goto out;
264 }
265
266 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
267
268 ref.low = cpu_to_le32(MFT_REC_MFT);
269 ref.high = 0;
270 ref.seq = cpu_to_le16(1);
271
272 inode = ntfs_iget5(sb, &ref, NULL);
273
274 if (IS_ERR(inode))
275 inode = NULL;
276
277 if (!inode) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300278 /* Try to use MFT copy. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300279 u64 t64 = sbi->mft.lbo;
280
281 sbi->mft.lbo = sbi->mft.lbo2;
282 inode = ntfs_iget5(sb, &ref, NULL);
283 sbi->mft.lbo = t64;
284 if (IS_ERR(inode))
285 inode = NULL;
286 }
287
288 if (!inode) {
289 err = -EINVAL;
290 ntfs_err(sb, "Failed to load $MFT.");
291 goto out;
292 }
293
294 sbi->mft.ni = ntfs_i(inode);
295
Kari Argillandere8b8e972021-08-03 14:57:09 +0300296 /* LogFile should not contains attribute list. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300297 err = ni_load_all_mi(sbi->mft.ni);
298 if (!err)
299 err = log_replay(ni, &initialized);
300
301 iput(inode);
302 sbi->mft.ni = NULL;
303
304 sync_blockdev(sb->s_bdev);
305 invalidate_bdev(sb->s_bdev);
306
307 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
308 err = 0;
309 goto out;
310 }
311
312 if (sb_rdonly(sb) || !initialized)
313 goto out;
314
Konstantin Komarovd3624462021-08-31 16:57:40 +0300315 /* Fill LogFile by '-1' if it is initialized. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300316 err = ntfs_bio_fill_1(sbi, &ni->file.run);
317
318out:
319 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
320
321 return err;
322}
323
324/*
325 * ntfs_query_def
326 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300327 * Return: Current ATTR_DEF_ENTRY for given attribute type.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300328 */
329const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
330 enum ATTR_TYPE type)
331{
332 int type_in = le32_to_cpu(type);
333 size_t min_idx = 0;
334 size_t max_idx = sbi->def_entries - 1;
335
336 while (min_idx <= max_idx) {
337 size_t i = min_idx + ((max_idx - min_idx) >> 1);
338 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
339 int diff = le32_to_cpu(entry->type) - type_in;
340
341 if (!diff)
342 return entry;
343 if (diff < 0)
344 min_idx = i + 1;
345 else if (i)
346 max_idx = i - 1;
347 else
348 return NULL;
349 }
350 return NULL;
351}
352
353/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300354 * ntfs_look_for_free_space - Look for a free space in bitmap.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300355 */
356int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
357 CLST *new_lcn, CLST *new_len,
358 enum ALLOCATE_OPT opt)
359{
360 int err;
Kari Argillanderedb853f2021-09-07 17:28:39 +0300361 CLST alen;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300362 struct super_block *sb = sbi->sb;
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300363 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300364 struct wnd_bitmap *wnd = &sbi->used.bitmap;
365
366 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
367 if (opt & ALLOCATE_MFT) {
Konstantin Komarov82cae262021-08-13 17:21:29 +0300368 zlen = wnd_zone_len(wnd);
369
370 if (!zlen) {
371 err = ntfs_refresh_zone(sbi);
372 if (err)
Kari Argillanderedb853f2021-09-07 17:28:39 +0300373 goto up_write;
374
Konstantin Komarov82cae262021-08-13 17:21:29 +0300375 zlen = wnd_zone_len(wnd);
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300376 }
Konstantin Komarov82cae262021-08-13 17:21:29 +0300377
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300378 if (!zlen) {
379 ntfs_err(sbi->sb, "no free space to extend mft");
Kari Argillanderedb853f2021-09-07 17:28:39 +0300380 err = -ENOSPC;
381 goto up_write;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300382 }
383
384 lcn = wnd_zone_bit(wnd);
Kari Argillander6e3331e2021-09-07 17:28:42 +0300385 alen = min_t(CLST, len, zlen);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300386
387 wnd_zone_set(wnd, lcn + alen, zlen - alen);
388
389 err = wnd_set_used(wnd, lcn, alen);
Kari Argillanderedb853f2021-09-07 17:28:39 +0300390 if (err)
391 goto up_write;
392
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300393 alcn = lcn;
Kari Argillanderedb853f2021-09-07 17:28:39 +0300394 goto space_found;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300395 }
Konstantin Komarov82cae262021-08-13 17:21:29 +0300396 /*
397 * 'Cause cluster 0 is always used this value means that we should use
Kari Argillandere8b8e972021-08-03 14:57:09 +0300398 * cached value of 'next_free_lcn' to improve performance.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300399 */
400 if (!lcn)
401 lcn = sbi->used.next_free_lcn;
402
403 if (lcn >= wnd->nbits)
404 lcn = 0;
405
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300406 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
407 if (alen)
Kari Argillanderedb853f2021-09-07 17:28:39 +0300408 goto space_found;
Konstantin Komarov82cae262021-08-13 17:21:29 +0300409
Kari Argillandere8b8e972021-08-03 14:57:09 +0300410 /* Try to use clusters from MftZone. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300411 zlen = wnd_zone_len(wnd);
412 zeroes = wnd_zeroes(wnd);
413
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300414 /* Check too big request */
Kari Argillanderedb853f2021-09-07 17:28:39 +0300415 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
416 err = -ENOSPC;
417 goto up_write;
418 }
Konstantin Komarov82cae262021-08-13 17:21:29 +0300419
Kari Argillandere8b8e972021-08-03 14:57:09 +0300420 /* How many clusters to cat from zone. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300421 zlcn = wnd_zone_bit(wnd);
422 zlen2 = zlen >> 1;
Kari Argillanderb5322eb2021-09-07 17:28:41 +0300423 ztrim = clamp_val(len, zlen2, zlen);
424 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
Konstantin Komarov82cae262021-08-13 17:21:29 +0300425
426 wnd_zone_set(wnd, zlcn, new_zlen);
427
Kari Argillandere8b8e972021-08-03 14:57:09 +0300428 /* Allocate continues clusters. */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300429 alen = wnd_find(wnd, len, 0,
430 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
Kari Argillanderedb853f2021-09-07 17:28:39 +0300431 if (!alen) {
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300432 err = -ENOSPC;
Kari Argillanderedb853f2021-09-07 17:28:39 +0300433 goto up_write;
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300434 }
435
Kari Argillanderedb853f2021-09-07 17:28:39 +0300436space_found:
437 err = 0;
438 *new_len = alen;
439 *new_lcn = alcn;
440
441 ntfs_unmap_meta(sb, alcn, alen);
442
443 /* Set hint for next requests. */
444 if (!(opt & ALLOCATE_MFT))
445 sbi->used.next_free_lcn = alcn + alen;
446up_write:
Konstantin Komarov82cae262021-08-13 17:21:29 +0300447 up_write(&wnd->rw_lock);
448 return err;
449}
450
451/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300452 * ntfs_extend_mft - Allocate additional MFT records.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300453 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300454 * sbi->mft.bitmap is locked for write.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300455 *
456 * NOTE: recursive:
457 * ntfs_look_free_mft ->
458 * ntfs_extend_mft ->
459 * attr_set_size ->
460 * ni_insert_nonresident ->
461 * ni_insert_attr ->
462 * ni_ins_attr_ext ->
463 * ntfs_look_free_mft ->
464 * ntfs_extend_mft
Kari Argillandere8b8e972021-08-03 14:57:09 +0300465 *
466 * To avoid recursive always allocate space for two new MFT records
467 * see attrib.c: "at least two MFT to avoid recursive loop".
Konstantin Komarov82cae262021-08-13 17:21:29 +0300468 */
469static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
470{
471 int err;
472 struct ntfs_inode *ni = sbi->mft.ni;
473 size_t new_mft_total;
474 u64 new_mft_bytes, new_bitmap_bytes;
475 struct ATTRIB *attr;
476 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
477
478 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
479 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
480
Kari Argillandere8b8e972021-08-03 14:57:09 +0300481 /* Step 1: Resize $MFT::DATA. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300482 down_write(&ni->file.run_lock);
483 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
484 new_mft_bytes, NULL, false, &attr);
485
486 if (err) {
487 up_write(&ni->file.run_lock);
488 goto out;
489 }
490
491 attr->nres.valid_size = attr->nres.data_size;
492 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
493 ni->mi.dirty = true;
494
Kari Argillandere8b8e972021-08-03 14:57:09 +0300495 /* Step 2: Resize $MFT::BITMAP. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300496 new_bitmap_bytes = bitmap_size(new_mft_total);
497
498 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
499 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
500
Kari Argillandere8b8e972021-08-03 14:57:09 +0300501 /* Refresh MFT Zone if necessary. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300502 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
503
504 ntfs_refresh_zone(sbi);
505
506 up_write(&sbi->used.bitmap.rw_lock);
507 up_write(&ni->file.run_lock);
508
509 if (err)
510 goto out;
511
512 err = wnd_extend(wnd, new_mft_total);
513
514 if (err)
515 goto out;
516
517 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
518
519 err = _ni_write_inode(&ni->vfs_inode, 0);
520out:
521 return err;
522}
523
524/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300525 * ntfs_look_free_mft - Look for a free MFT record.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300526 */
527int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
528 struct ntfs_inode *ni, struct mft_inode **mi)
529{
530 int err = 0;
531 size_t zbit, zlen, from, to, fr;
532 size_t mft_total;
533 struct MFT_REF ref;
534 struct super_block *sb = sbi->sb;
535 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
536 u32 ir;
537
538 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
539 MFT_REC_FREE - MFT_REC_RESERVED);
540
541 if (!mft)
542 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
543
544 zlen = wnd_zone_len(wnd);
545
Kari Argillandere8b8e972021-08-03 14:57:09 +0300546 /* Always reserve space for MFT. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300547 if (zlen) {
548 if (mft) {
549 zbit = wnd_zone_bit(wnd);
550 *rno = zbit;
551 wnd_zone_set(wnd, zbit + 1, zlen - 1);
552 }
553 goto found;
554 }
555
Kari Argillandere8b8e972021-08-03 14:57:09 +0300556 /* No MFT zone. Find the nearest to '0' free MFT. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300557 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
558 /* Resize MFT */
559 mft_total = wnd->nbits;
560
561 err = ntfs_extend_mft(sbi);
562 if (!err) {
563 zbit = mft_total;
564 goto reserve_mft;
565 }
566
567 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
568 goto out;
569
570 err = 0;
571
572 /*
573 * Look for free record reserved area [11-16) ==
574 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
Kari Argillandere8b8e972021-08-03 14:57:09 +0300575 * marks it as used.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300576 */
577 if (!sbi->mft.reserved_bitmap) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300578 /* Once per session create internal bitmap for 5 bits. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300579 sbi->mft.reserved_bitmap = 0xFF;
580
581 ref.high = 0;
582 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
583 struct inode *i;
584 struct ntfs_inode *ni;
585 struct MFT_REC *mrec;
586
587 ref.low = cpu_to_le32(ir);
588 ref.seq = cpu_to_le16(ir);
589
590 i = ntfs_iget5(sb, &ref, NULL);
591 if (IS_ERR(i)) {
592next:
593 ntfs_notice(
594 sb,
595 "Invalid reserved record %x",
596 ref.low);
597 continue;
598 }
599 if (is_bad_inode(i)) {
600 iput(i);
601 goto next;
602 }
603
604 ni = ntfs_i(i);
605
606 mrec = ni->mi.mrec;
607
608 if (!is_rec_base(mrec))
609 goto next;
610
611 if (mrec->hard_links)
612 goto next;
613
614 if (!ni_std(ni))
615 goto next;
616
617 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
618 NULL, 0, NULL, NULL))
619 goto next;
620
621 __clear_bit(ir - MFT_REC_RESERVED,
622 &sbi->mft.reserved_bitmap);
623 }
624 }
625
626 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
627 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
628 MFT_REC_FREE, MFT_REC_RESERVED);
629 if (zbit >= MFT_REC_FREE) {
630 sbi->mft.next_reserved = MFT_REC_FREE;
631 goto out;
632 }
633
634 zlen = 1;
635 sbi->mft.next_reserved = zbit;
636 } else {
637reserve_mft:
638 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
639 if (zbit + zlen > wnd->nbits)
640 zlen = wnd->nbits - zbit;
641
642 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
643 zlen -= 1;
644
Kari Argillandere8b8e972021-08-03 14:57:09 +0300645 /* [zbit, zbit + zlen) will be used for MFT itself. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300646 from = sbi->mft.used;
647 if (from < zbit)
648 from = zbit;
649 to = zbit + zlen;
650 if (from < to) {
651 ntfs_clear_mft_tail(sbi, from, to);
652 sbi->mft.used = to;
653 }
654 }
655
656 if (mft) {
657 *rno = zbit;
658 zbit += 1;
659 zlen -= 1;
660 }
661
662 wnd_zone_set(wnd, zbit, zlen);
663
664found:
665 if (!mft) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300666 /* The request to get record for general purpose. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300667 if (sbi->mft.next_free < MFT_REC_USER)
668 sbi->mft.next_free = MFT_REC_USER;
669
670 for (;;) {
671 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
672 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
673 sbi->mft.next_free = sbi->mft.bitmap.nbits;
674 } else {
675 *rno = fr;
676 sbi->mft.next_free = *rno + 1;
677 break;
678 }
679
680 err = ntfs_extend_mft(sbi);
681 if (err)
682 goto out;
683 }
684 }
685
686 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
687 err = -ENOMEM;
688 goto out;
689 }
690
Kari Argillandere8b8e972021-08-03 14:57:09 +0300691 /* We have found a record that are not reserved for next MFT. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300692 if (*rno >= MFT_REC_FREE)
693 wnd_set_used(wnd, *rno, 1);
694 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
695 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
696
697out:
698 if (!mft)
699 up_write(&wnd->rw_lock);
700
701 return err;
702}
703
704/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300705 * ntfs_mark_rec_free - Mark record as free.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300706 */
707void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
708{
709 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
710
711 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
712 if (rno >= wnd->nbits)
713 goto out;
714
715 if (rno >= MFT_REC_FREE) {
716 if (!wnd_is_used(wnd, rno, 1))
717 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
718 else
719 wnd_set_free(wnd, rno, 1);
720 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
721 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
722 }
723
724 if (rno < wnd_zone_bit(wnd))
725 wnd_zone_set(wnd, rno, 1);
726 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
727 sbi->mft.next_free = rno;
728
729out:
730 up_write(&wnd->rw_lock);
731}
732
733/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300734 * ntfs_clear_mft_tail - Format empty records [from, to).
Konstantin Komarov82cae262021-08-13 17:21:29 +0300735 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300736 * sbi->mft.bitmap is locked for write.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300737 */
738int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
739{
740 int err;
741 u32 rs;
742 u64 vbo;
743 struct runs_tree *run;
744 struct ntfs_inode *ni;
745
746 if (from >= to)
747 return 0;
748
749 rs = sbi->record_size;
750 ni = sbi->mft.ni;
751 run = &ni->file.run;
752
753 down_read(&ni->file.run_lock);
754 vbo = (u64)from * rs;
755 for (; from < to; from++, vbo += rs) {
756 struct ntfs_buffers nb;
757
758 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
759 if (err)
760 goto out;
761
762 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
763 nb_put(&nb);
764 if (err)
765 goto out;
766 }
767
768out:
769 sbi->mft.used = from;
770 up_read(&ni->file.run_lock);
771 return err;
772}
773
774/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300775 * ntfs_refresh_zone - Refresh MFT zone.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300776 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300777 * sbi->used.bitmap is locked for rw.
778 * sbi->mft.bitmap is locked for write.
779 * sbi->mft.ni->file.run_lock for write.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300780 */
781int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
782{
783 CLST zone_limit, zone_max, lcn, vcn, len;
784 size_t lcn_s, zlen;
785 struct wnd_bitmap *wnd = &sbi->used.bitmap;
786 struct ntfs_inode *ni = sbi->mft.ni;
787
Kari Argillandere8b8e972021-08-03 14:57:09 +0300788 /* Do not change anything unless we have non empty MFT zone. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300789 if (wnd_zone_len(wnd))
790 return 0;
791
792 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300793 * Compute the MFT zone at two steps.
794 * It would be nice if we are able to allocate 1/8 of
795 * total clusters for MFT but not more then 512 MB.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300796 */
797 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
798 zone_max = wnd->nbits >> 3;
799 if (zone_max > zone_limit)
800 zone_max = zone_limit;
801
802 vcn = bytes_to_cluster(sbi,
803 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
804
805 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
806 lcn = SPARSE_LCN;
807
Kari Argillandere8b8e972021-08-03 14:57:09 +0300808 /* We should always find Last Lcn for MFT. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300809 if (lcn == SPARSE_LCN)
810 return -EINVAL;
811
812 lcn_s = lcn + 1;
813
Kari Argillandere8b8e972021-08-03 14:57:09 +0300814 /* Try to allocate clusters after last MFT run. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300815 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
816 if (!zlen) {
817 ntfs_notice(sbi->sb, "MftZone: unavailable");
818 return 0;
819 }
820
Kari Argillandere8b8e972021-08-03 14:57:09 +0300821 /* Truncate too large zone. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300822 wnd_zone_set(wnd, lcn_s, zlen);
823
824 return 0;
825}
826
827/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300828 * ntfs_update_mftmirr - Update $MFTMirr data.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300829 */
830int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
831{
832 int err;
833 struct super_block *sb = sbi->sb;
834 u32 blocksize = sb->s_blocksize;
835 sector_t block1, block2;
836 u32 bytes;
837
838 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
839 return 0;
840
841 err = 0;
842 bytes = sbi->mft.recs_mirr << sbi->record_bits;
843 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
844 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
845
846 for (; bytes >= blocksize; bytes -= blocksize) {
847 struct buffer_head *bh1, *bh2;
848
849 bh1 = sb_bread(sb, block1++);
850 if (!bh1) {
851 err = -EIO;
852 goto out;
853 }
854
855 bh2 = sb_getblk(sb, block2++);
856 if (!bh2) {
857 put_bh(bh1);
858 err = -EIO;
859 goto out;
860 }
861
862 if (buffer_locked(bh2))
863 __wait_on_buffer(bh2);
864
865 lock_buffer(bh2);
866 memcpy(bh2->b_data, bh1->b_data, blocksize);
867 set_buffer_uptodate(bh2);
868 mark_buffer_dirty(bh2);
869 unlock_buffer(bh2);
870
871 put_bh(bh1);
872 bh1 = NULL;
873
874 if (wait)
875 err = sync_dirty_buffer(bh2);
876
877 put_bh(bh2);
878 if (err)
879 goto out;
880 }
881
882 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
883
884out:
885 return err;
886}
887
888/*
889 * ntfs_set_state
890 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300891 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
892 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
893 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
Konstantin Komarov82cae262021-08-13 17:21:29 +0300894 */
895int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
896{
897 int err;
898 struct ATTRIB *attr;
899 struct VOLUME_INFO *info;
900 struct mft_inode *mi;
901 struct ntfs_inode *ni;
902
903 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300904 * Do not change state if fs was real_dirty.
905 * Do not change state if fs already dirty(clear).
906 * Do not change any thing if mounted read only.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300907 */
908 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
909 return 0;
910
Kari Argillandere8b8e972021-08-03 14:57:09 +0300911 /* Check cached value. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300912 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
913 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
914 return 0;
915
916 ni = sbi->volume.ni;
917 if (!ni)
918 return -EINVAL;
919
920 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
921
922 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
923 if (!attr) {
924 err = -EINVAL;
925 goto out;
926 }
927
928 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
929 if (!info) {
930 err = -EINVAL;
931 goto out;
932 }
933
934 switch (dirty) {
935 case NTFS_DIRTY_ERROR:
936 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
937 sbi->volume.real_dirty = true;
938 fallthrough;
939 case NTFS_DIRTY_DIRTY:
940 info->flags |= VOLUME_FLAG_DIRTY;
941 break;
942 case NTFS_DIRTY_CLEAR:
943 info->flags &= ~VOLUME_FLAG_DIRTY;
944 break;
945 }
Kari Argillandere8b8e972021-08-03 14:57:09 +0300946 /* Cache current volume flags. */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300947 sbi->volume.flags = info->flags;
948 mi->dirty = true;
949 err = 0;
950
951out:
952 ni_unlock(ni);
953 if (err)
954 return err;
955
956 mark_inode_dirty(&ni->vfs_inode);
Kari Argillandere8b8e972021-08-03 14:57:09 +0300957 /* verify(!ntfs_update_mftmirr()); */
Konstantin Komarov82cae262021-08-13 17:21:29 +0300958
959 /*
Konstantin Komarovd3624462021-08-31 16:57:40 +0300960 * If we used wait=1, sync_inode_metadata waits for the io for the
Konstantin Komarov82cae262021-08-13 17:21:29 +0300961 * inode to finish. It hangs when media is removed.
962 * So wait=0 is sent down to sync_inode_metadata
Konstantin Komarovd3624462021-08-31 16:57:40 +0300963 * and filemap_fdatawrite is used for the data blocks.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300964 */
965 err = sync_inode_metadata(&ni->vfs_inode, 0);
966 if (!err)
967 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
968
969 return err;
970}
971
972/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300973 * security_hash - Calculates a hash of security descriptor.
Konstantin Komarov82cae262021-08-13 17:21:29 +0300974 */
975static inline __le32 security_hash(const void *sd, size_t bytes)
976{
977 u32 hash = 0;
978 const __le32 *ptr = sd;
979
980 bytes >>= 2;
981 while (bytes--)
982 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
983 return cpu_to_le32(hash);
984}
985
986int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
987{
988 struct block_device *bdev = sb->s_bdev;
989 u32 blocksize = sb->s_blocksize;
990 u64 block = lbo >> sb->s_blocksize_bits;
991 u32 off = lbo & (blocksize - 1);
992 u32 op = blocksize - off;
993
994 for (; bytes; block += 1, off = 0, op = blocksize) {
995 struct buffer_head *bh = __bread(bdev, block, blocksize);
996
997 if (!bh)
998 return -EIO;
999
1000 if (op > bytes)
1001 op = bytes;
1002
1003 memcpy(buffer, bh->b_data + off, op);
1004
1005 put_bh(bh);
1006
1007 bytes -= op;
1008 buffer = Add2Ptr(buffer, op);
1009 }
1010
1011 return 0;
1012}
1013
1014int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1015 const void *buf, int wait)
1016{
1017 u32 blocksize = sb->s_blocksize;
1018 struct block_device *bdev = sb->s_bdev;
1019 sector_t block = lbo >> sb->s_blocksize_bits;
1020 u32 off = lbo & (blocksize - 1);
1021 u32 op = blocksize - off;
1022 struct buffer_head *bh;
1023
1024 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1025 wait = 1;
1026
1027 for (; bytes; block += 1, off = 0, op = blocksize) {
1028 if (op > bytes)
1029 op = bytes;
1030
1031 if (op < blocksize) {
1032 bh = __bread(bdev, block, blocksize);
1033 if (!bh) {
1034 ntfs_err(sb, "failed to read block %llx",
1035 (u64)block);
1036 return -EIO;
1037 }
1038 } else {
1039 bh = __getblk(bdev, block, blocksize);
1040 if (!bh)
1041 return -ENOMEM;
1042 }
1043
1044 if (buffer_locked(bh))
1045 __wait_on_buffer(bh);
1046
1047 lock_buffer(bh);
1048 if (buf) {
1049 memcpy(bh->b_data + off, buf, op);
1050 buf = Add2Ptr(buf, op);
1051 } else {
1052 memset(bh->b_data + off, -1, op);
1053 }
1054
1055 set_buffer_uptodate(bh);
1056 mark_buffer_dirty(bh);
1057 unlock_buffer(bh);
1058
1059 if (wait) {
1060 int err = sync_dirty_buffer(bh);
1061
1062 if (err) {
1063 ntfs_err(
1064 sb,
1065 "failed to sync buffer at block %llx, error %d",
1066 (u64)block, err);
1067 put_bh(bh);
1068 return err;
1069 }
1070 }
1071
1072 put_bh(bh);
1073
1074 bytes -= op;
1075 }
1076 return 0;
1077}
1078
1079int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1080 u64 vbo, const void *buf, size_t bytes)
1081{
1082 struct super_block *sb = sbi->sb;
1083 u8 cluster_bits = sbi->cluster_bits;
1084 u32 off = vbo & sbi->cluster_mask;
1085 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1086 u64 lbo, len;
1087 size_t idx;
1088
1089 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1090 return -ENOENT;
1091
1092 if (lcn == SPARSE_LCN)
1093 return -EINVAL;
1094
1095 lbo = ((u64)lcn << cluster_bits) + off;
1096 len = ((u64)clen << cluster_bits) - off;
1097
1098 for (;;) {
Kari Argillander6e3331e2021-09-07 17:28:42 +03001099 u32 op = min_t(u64, len, bytes);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001100 int err = ntfs_sb_write(sb, lbo, op, buf, 0);
1101
1102 if (err)
1103 return err;
1104
1105 bytes -= op;
1106 if (!bytes)
1107 break;
1108
1109 vcn_next = vcn + clen;
1110 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1111 vcn != vcn_next)
1112 return -ENOENT;
1113
1114 if (lcn == SPARSE_LCN)
1115 return -EINVAL;
1116
1117 if (buf)
1118 buf = Add2Ptr(buf, op);
1119
1120 lbo = ((u64)lcn << cluster_bits);
1121 len = ((u64)clen << cluster_bits);
1122 }
1123
1124 return 0;
1125}
1126
1127struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1128 const struct runs_tree *run, u64 vbo)
1129{
1130 struct super_block *sb = sbi->sb;
1131 u8 cluster_bits = sbi->cluster_bits;
1132 CLST lcn;
1133 u64 lbo;
1134
1135 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1136 return ERR_PTR(-ENOENT);
1137
1138 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1139
1140 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1141}
1142
1143int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1144 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1145{
1146 int err;
1147 struct super_block *sb = sbi->sb;
1148 u32 blocksize = sb->s_blocksize;
1149 u8 cluster_bits = sbi->cluster_bits;
1150 u32 off = vbo & sbi->cluster_mask;
1151 u32 nbh = 0;
1152 CLST vcn_next, vcn = vbo >> cluster_bits;
1153 CLST lcn, clen;
1154 u64 lbo, len;
1155 size_t idx;
1156 struct buffer_head *bh;
1157
1158 if (!run) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001159 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001160 if (vbo > MFT_REC_VOL * sbi->record_size) {
1161 err = -ENOENT;
1162 goto out;
1163 }
1164
Kari Argillandere8b8e972021-08-03 14:57:09 +03001165 /* Use absolute boot's 'MFTCluster' to read record. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001166 lbo = vbo + sbi->mft.lbo;
1167 len = sbi->record_size;
1168 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1169 err = -ENOENT;
1170 goto out;
1171 } else {
1172 if (lcn == SPARSE_LCN) {
1173 err = -EINVAL;
1174 goto out;
1175 }
1176
1177 lbo = ((u64)lcn << cluster_bits) + off;
1178 len = ((u64)clen << cluster_bits) - off;
1179 }
1180
1181 off = lbo & (blocksize - 1);
1182 if (nb) {
1183 nb->off = off;
1184 nb->bytes = bytes;
1185 }
1186
1187 for (;;) {
1188 u32 len32 = len >= bytes ? bytes : len;
1189 sector_t block = lbo >> sb->s_blocksize_bits;
1190
1191 do {
1192 u32 op = blocksize - off;
1193
1194 if (op > len32)
1195 op = len32;
1196
1197 bh = ntfs_bread(sb, block);
1198 if (!bh) {
1199 err = -EIO;
1200 goto out;
1201 }
1202
1203 if (buf) {
1204 memcpy(buf, bh->b_data + off, op);
1205 buf = Add2Ptr(buf, op);
1206 }
1207
1208 if (!nb) {
1209 put_bh(bh);
1210 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1211 err = -EINVAL;
1212 goto out;
1213 } else {
1214 nb->bh[nbh++] = bh;
1215 nb->nbufs = nbh;
1216 }
1217
1218 bytes -= op;
1219 if (!bytes)
1220 return 0;
1221 len32 -= op;
1222 block += 1;
1223 off = 0;
1224
1225 } while (len32);
1226
1227 vcn_next = vcn + clen;
1228 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1229 vcn != vcn_next) {
1230 err = -ENOENT;
1231 goto out;
1232 }
1233
1234 if (lcn == SPARSE_LCN) {
1235 err = -EINVAL;
1236 goto out;
1237 }
1238
1239 lbo = ((u64)lcn << cluster_bits);
1240 len = ((u64)clen << cluster_bits);
1241 }
1242
1243out:
1244 if (!nbh)
1245 return err;
1246
1247 while (nbh) {
1248 put_bh(nb->bh[--nbh]);
1249 nb->bh[nbh] = NULL;
1250 }
1251
1252 nb->nbufs = 0;
1253 return err;
1254}
1255
Kari Argillandere8b8e972021-08-03 14:57:09 +03001256/*
1257 * ntfs_read_bh
1258 *
1259 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1260 */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001261int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1262 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1263 struct ntfs_buffers *nb)
1264{
1265 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1266
1267 if (err)
1268 return err;
1269 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1270}
1271
1272int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1273 u32 bytes, struct ntfs_buffers *nb)
1274{
1275 int err = 0;
1276 struct super_block *sb = sbi->sb;
1277 u32 blocksize = sb->s_blocksize;
1278 u8 cluster_bits = sbi->cluster_bits;
1279 CLST vcn_next, vcn = vbo >> cluster_bits;
1280 u32 off;
1281 u32 nbh = 0;
1282 CLST lcn, clen;
1283 u64 lbo, len;
1284 size_t idx;
1285
1286 nb->bytes = bytes;
1287
1288 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1289 err = -ENOENT;
1290 goto out;
1291 }
1292
1293 off = vbo & sbi->cluster_mask;
1294 lbo = ((u64)lcn << cluster_bits) + off;
1295 len = ((u64)clen << cluster_bits) - off;
1296
1297 nb->off = off = lbo & (blocksize - 1);
1298
1299 for (;;) {
Kari Argillander6e3331e2021-09-07 17:28:42 +03001300 u32 len32 = min_t(u64, len, bytes);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001301 sector_t block = lbo >> sb->s_blocksize_bits;
1302
1303 do {
1304 u32 op;
1305 struct buffer_head *bh;
1306
1307 if (nbh >= ARRAY_SIZE(nb->bh)) {
1308 err = -EINVAL;
1309 goto out;
1310 }
1311
1312 op = blocksize - off;
1313 if (op > len32)
1314 op = len32;
1315
1316 if (op == blocksize) {
1317 bh = sb_getblk(sb, block);
1318 if (!bh) {
1319 err = -ENOMEM;
1320 goto out;
1321 }
1322 if (buffer_locked(bh))
1323 __wait_on_buffer(bh);
1324 set_buffer_uptodate(bh);
1325 } else {
1326 bh = ntfs_bread(sb, block);
1327 if (!bh) {
1328 err = -EIO;
1329 goto out;
1330 }
1331 }
1332
1333 nb->bh[nbh++] = bh;
1334 bytes -= op;
1335 if (!bytes) {
1336 nb->nbufs = nbh;
1337 return 0;
1338 }
1339
1340 block += 1;
1341 len32 -= op;
1342 off = 0;
1343 } while (len32);
1344
1345 vcn_next = vcn + clen;
1346 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1347 vcn != vcn_next) {
1348 err = -ENOENT;
1349 goto out;
1350 }
1351
1352 lbo = ((u64)lcn << cluster_bits);
1353 len = ((u64)clen << cluster_bits);
1354 }
1355
1356out:
1357 while (nbh) {
1358 put_bh(nb->bh[--nbh]);
1359 nb->bh[nbh] = NULL;
1360 }
1361
1362 nb->nbufs = 0;
1363
1364 return err;
1365}
1366
1367int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1368 struct ntfs_buffers *nb, int sync)
1369{
1370 int err = 0;
1371 struct super_block *sb = sbi->sb;
1372 u32 block_size = sb->s_blocksize;
1373 u32 bytes = nb->bytes;
1374 u32 off = nb->off;
1375 u16 fo = le16_to_cpu(rhdr->fix_off);
1376 u16 fn = le16_to_cpu(rhdr->fix_num);
1377 u32 idx;
1378 __le16 *fixup;
1379 __le16 sample;
1380
1381 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1382 fn * SECTOR_SIZE > bytes) {
1383 return -EINVAL;
1384 }
1385
1386 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1387 u32 op = block_size - off;
1388 char *bh_data;
1389 struct buffer_head *bh = nb->bh[idx];
1390 __le16 *ptr, *end_data;
1391
1392 if (op > bytes)
1393 op = bytes;
1394
1395 if (buffer_locked(bh))
1396 __wait_on_buffer(bh);
1397
1398 lock_buffer(nb->bh[idx]);
1399
1400 bh_data = bh->b_data + off;
1401 end_data = Add2Ptr(bh_data, op);
1402 memcpy(bh_data, rhdr, op);
1403
1404 if (!idx) {
1405 u16 t16;
1406
1407 fixup = Add2Ptr(bh_data, fo);
1408 sample = *fixup;
1409 t16 = le16_to_cpu(sample);
1410 if (t16 >= 0x7FFF) {
1411 sample = *fixup = cpu_to_le16(1);
1412 } else {
1413 sample = cpu_to_le16(t16 + 1);
1414 *fixup = sample;
1415 }
1416
1417 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1418 }
1419
1420 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1421
1422 do {
1423 *++fixup = *ptr;
1424 *ptr = sample;
1425 ptr += SECTOR_SIZE / sizeof(short);
1426 } while (ptr < end_data);
1427
1428 set_buffer_uptodate(bh);
1429 mark_buffer_dirty(bh);
1430 unlock_buffer(bh);
1431
1432 if (sync) {
1433 int err2 = sync_dirty_buffer(bh);
1434
1435 if (!err && err2)
1436 err = err2;
1437 }
1438
1439 bytes -= op;
1440 rhdr = Add2Ptr(rhdr, op);
1441 }
1442
1443 return err;
1444}
1445
1446static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1447{
1448 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1449
1450 if (!bio && (current->flags & PF_MEMALLOC)) {
1451 while (!bio && (nr_vecs /= 2))
1452 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1453 }
1454 return bio;
1455}
1456
Kari Argillandere8b8e972021-08-03 14:57:09 +03001457/*
1458 * ntfs_bio_pages - Read/write pages from/to disk.
1459 */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001460int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1461 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1462 u32 op)
1463{
1464 int err = 0;
1465 struct bio *new, *bio = NULL;
1466 struct super_block *sb = sbi->sb;
1467 struct block_device *bdev = sb->s_bdev;
1468 struct page *page;
1469 u8 cluster_bits = sbi->cluster_bits;
1470 CLST lcn, clen, vcn, vcn_next;
1471 u32 add, off, page_idx;
1472 u64 lbo, len;
1473 size_t run_idx;
1474 struct blk_plug plug;
1475
1476 if (!bytes)
1477 return 0;
1478
1479 blk_start_plug(&plug);
1480
Kari Argillandere8b8e972021-08-03 14:57:09 +03001481 /* Align vbo and bytes to be 512 bytes aligned. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001482 lbo = (vbo + bytes + 511) & ~511ull;
1483 vbo = vbo & ~511ull;
1484 bytes = lbo - vbo;
1485
1486 vcn = vbo >> cluster_bits;
1487 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1488 err = -ENOENT;
1489 goto out;
1490 }
1491 off = vbo & sbi->cluster_mask;
1492 page_idx = 0;
1493 page = pages[0];
1494
1495 for (;;) {
1496 lbo = ((u64)lcn << cluster_bits) + off;
1497 len = ((u64)clen << cluster_bits) - off;
1498new_bio:
1499 new = ntfs_alloc_bio(nr_pages - page_idx);
1500 if (!new) {
1501 err = -ENOMEM;
1502 goto out;
1503 }
1504 if (bio) {
1505 bio_chain(bio, new);
1506 submit_bio(bio);
1507 }
1508 bio = new;
1509 bio_set_dev(bio, bdev);
1510 bio->bi_iter.bi_sector = lbo >> 9;
1511 bio->bi_opf = op;
1512
1513 while (len) {
1514 off = vbo & (PAGE_SIZE - 1);
1515 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1516
1517 if (bio_add_page(bio, page, add, off) < add)
1518 goto new_bio;
1519
1520 if (bytes <= add)
1521 goto out;
1522 bytes -= add;
1523 vbo += add;
1524
1525 if (add + off == PAGE_SIZE) {
1526 page_idx += 1;
1527 if (WARN_ON(page_idx >= nr_pages)) {
1528 err = -EINVAL;
1529 goto out;
1530 }
1531 page = pages[page_idx];
1532 }
1533
1534 if (len <= add)
1535 break;
1536 len -= add;
1537 lbo += add;
1538 }
1539
1540 vcn_next = vcn + clen;
1541 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1542 vcn != vcn_next) {
1543 err = -ENOENT;
1544 goto out;
1545 }
1546 off = 0;
1547 }
1548out:
1549 if (bio) {
1550 if (!err)
1551 err = submit_bio_wait(bio);
1552 bio_put(bio);
1553 }
1554 blk_finish_plug(&plug);
1555
1556 return err;
1557}
1558
1559/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001560 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1561 *
1562 * Fill on-disk logfile range by (-1)
1563 * this means empty logfile.
Konstantin Komarov82cae262021-08-13 17:21:29 +03001564 */
1565int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1566{
1567 int err = 0;
1568 struct super_block *sb = sbi->sb;
1569 struct block_device *bdev = sb->s_bdev;
1570 u8 cluster_bits = sbi->cluster_bits;
1571 struct bio *new, *bio = NULL;
1572 CLST lcn, clen;
1573 u64 lbo, len;
1574 size_t run_idx;
1575 struct page *fill;
1576 void *kaddr;
1577 struct blk_plug plug;
1578
1579 fill = alloc_page(GFP_KERNEL);
1580 if (!fill)
1581 return -ENOMEM;
1582
1583 kaddr = kmap_atomic(fill);
1584 memset(kaddr, -1, PAGE_SIZE);
1585 kunmap_atomic(kaddr);
1586 flush_dcache_page(fill);
1587 lock_page(fill);
1588
1589 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1590 err = -ENOENT;
1591 goto out;
1592 }
1593
1594 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001595 * TODO: Try blkdev_issue_write_same.
Konstantin Komarov82cae262021-08-13 17:21:29 +03001596 */
1597 blk_start_plug(&plug);
1598 do {
1599 lbo = (u64)lcn << cluster_bits;
1600 len = (u64)clen << cluster_bits;
1601new_bio:
1602 new = ntfs_alloc_bio(BIO_MAX_VECS);
1603 if (!new) {
1604 err = -ENOMEM;
1605 break;
1606 }
1607 if (bio) {
1608 bio_chain(bio, new);
1609 submit_bio(bio);
1610 }
1611 bio = new;
1612 bio_set_dev(bio, bdev);
1613 bio->bi_opf = REQ_OP_WRITE;
1614 bio->bi_iter.bi_sector = lbo >> 9;
1615
1616 for (;;) {
1617 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1618
1619 if (bio_add_page(bio, fill, add, 0) < add)
1620 goto new_bio;
1621
1622 lbo += add;
1623 if (len <= add)
1624 break;
1625 len -= add;
1626 }
1627 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1628
1629 if (bio) {
1630 if (!err)
1631 err = submit_bio_wait(bio);
1632 bio_put(bio);
1633 }
1634 blk_finish_plug(&plug);
1635out:
1636 unlock_page(fill);
1637 put_page(fill);
1638
1639 return err;
1640}
1641
1642int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1643 u64 vbo, u64 *lbo, u64 *bytes)
1644{
1645 u32 off;
1646 CLST lcn, len;
1647 u8 cluster_bits = sbi->cluster_bits;
1648
1649 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1650 return -ENOENT;
1651
1652 off = vbo & sbi->cluster_mask;
1653 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1654 *bytes = ((u64)len << cluster_bits) - off;
1655
1656 return 0;
1657}
1658
1659struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1660{
1661 int err = 0;
1662 struct super_block *sb = sbi->sb;
1663 struct inode *inode = new_inode(sb);
1664 struct ntfs_inode *ni;
1665
1666 if (!inode)
1667 return ERR_PTR(-ENOMEM);
1668
1669 ni = ntfs_i(inode);
1670
1671 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1672 false);
1673 if (err)
1674 goto out;
1675
1676 inode->i_ino = rno;
1677 if (insert_inode_locked(inode) < 0) {
1678 err = -EIO;
1679 goto out;
1680 }
1681
1682out:
1683 if (err) {
1684 iput(inode);
1685 ni = ERR_PTR(err);
1686 }
1687 return ni;
1688}
1689
1690/*
1691 * O:BAG:BAD:(A;OICI;FA;;;WD)
Kari Argillandere8b8e972021-08-03 14:57:09 +03001692 * Owner S-1-5-32-544 (Administrators)
1693 * Group S-1-5-32-544 (Administrators)
Konstantin Komarov82cae262021-08-13 17:21:29 +03001694 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1695 */
1696const u8 s_default_security[] __aligned(8) = {
1697 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1698 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1699 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1700 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1701 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1702 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1703 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1704};
1705
1706static_assert(sizeof(s_default_security) == 0x50);
1707
1708static inline u32 sid_length(const struct SID *sid)
1709{
1710 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1711}
1712
1713/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001714 * is_acl_valid
1715 *
1716 * Thanks Mark Harmstone for idea.
Konstantin Komarov82cae262021-08-13 17:21:29 +03001717 */
1718static bool is_acl_valid(const struct ACL *acl, u32 len)
1719{
1720 const struct ACE_HEADER *ace;
1721 u32 i;
1722 u16 ace_count, ace_size;
1723
1724 if (acl->AclRevision != ACL_REVISION &&
1725 acl->AclRevision != ACL_REVISION_DS) {
1726 /*
1727 * This value should be ACL_REVISION, unless the ACL contains an
1728 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1729 * All ACEs in an ACL must be at the same revision level.
1730 */
1731 return false;
1732 }
1733
1734 if (acl->Sbz1)
1735 return false;
1736
1737 if (le16_to_cpu(acl->AclSize) > len)
1738 return false;
1739
1740 if (acl->Sbz2)
1741 return false;
1742
1743 len -= sizeof(struct ACL);
1744 ace = (struct ACE_HEADER *)&acl[1];
1745 ace_count = le16_to_cpu(acl->AceCount);
1746
1747 for (i = 0; i < ace_count; i++) {
1748 if (len < sizeof(struct ACE_HEADER))
1749 return false;
1750
1751 ace_size = le16_to_cpu(ace->AceSize);
1752 if (len < ace_size)
1753 return false;
1754
1755 len -= ace_size;
1756 ace = Add2Ptr(ace, ace_size);
1757 }
1758
1759 return true;
1760}
1761
1762bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1763{
1764 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1765
1766 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1767 return false;
1768
1769 if (sd->Revision != 1)
1770 return false;
1771
1772 if (sd->Sbz1)
1773 return false;
1774
1775 if (!(sd->Control & SE_SELF_RELATIVE))
1776 return false;
1777
1778 sd_owner = le32_to_cpu(sd->Owner);
1779 if (sd_owner) {
1780 const struct SID *owner = Add2Ptr(sd, sd_owner);
1781
1782 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1783 return false;
1784
1785 if (owner->Revision != 1)
1786 return false;
1787
1788 if (sd_owner + sid_length(owner) > len)
1789 return false;
1790 }
1791
1792 sd_group = le32_to_cpu(sd->Group);
1793 if (sd_group) {
1794 const struct SID *group = Add2Ptr(sd, sd_group);
1795
1796 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1797 return false;
1798
1799 if (group->Revision != 1)
1800 return false;
1801
1802 if (sd_group + sid_length(group) > len)
1803 return false;
1804 }
1805
1806 sd_sacl = le32_to_cpu(sd->Sacl);
1807 if (sd_sacl) {
1808 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1809
1810 if (sd_sacl + sizeof(struct ACL) > len)
1811 return false;
1812
1813 if (!is_acl_valid(sacl, len - sd_sacl))
1814 return false;
1815 }
1816
1817 sd_dacl = le32_to_cpu(sd->Dacl);
1818 if (sd_dacl) {
1819 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1820
1821 if (sd_dacl + sizeof(struct ACL) > len)
1822 return false;
1823
1824 if (!is_acl_valid(dacl, len - sd_dacl))
1825 return false;
1826 }
1827
1828 return true;
1829}
1830
1831/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001832 * ntfs_security_init - Load and parse $Secure.
Konstantin Komarov82cae262021-08-13 17:21:29 +03001833 */
1834int ntfs_security_init(struct ntfs_sb_info *sbi)
1835{
1836 int err;
1837 struct super_block *sb = sbi->sb;
1838 struct inode *inode;
1839 struct ntfs_inode *ni;
1840 struct MFT_REF ref;
1841 struct ATTRIB *attr;
1842 struct ATTR_LIST_ENTRY *le;
1843 u64 sds_size;
Nathan Chancellor8c013082021-08-16 12:30:41 -07001844 size_t off;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001845 struct NTFS_DE *ne;
1846 struct NTFS_DE_SII *sii_e;
1847 struct ntfs_fnd *fnd_sii = NULL;
1848 const struct INDEX_ROOT *root_sii;
1849 const struct INDEX_ROOT *root_sdh;
1850 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1851 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1852
1853 ref.low = cpu_to_le32(MFT_REC_SECURE);
1854 ref.high = 0;
1855 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1856
1857 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1858 if (IS_ERR(inode)) {
1859 err = PTR_ERR(inode);
1860 ntfs_err(sb, "Failed to load $Secure.");
1861 inode = NULL;
1862 goto out;
1863 }
1864
1865 ni = ntfs_i(inode);
1866
1867 le = NULL;
1868
1869 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1870 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1871 if (!attr) {
1872 err = -EINVAL;
1873 goto out;
1874 }
1875
1876 root_sdh = resident_data(attr);
1877 if (root_sdh->type != ATTR_ZERO ||
1878 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1879 err = -EINVAL;
1880 goto out;
1881 }
1882
1883 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1884 if (err)
1885 goto out;
1886
1887 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1888 ARRAY_SIZE(SII_NAME), NULL, NULL);
1889 if (!attr) {
1890 err = -EINVAL;
1891 goto out;
1892 }
1893
1894 root_sii = resident_data(attr);
1895 if (root_sii->type != ATTR_ZERO ||
1896 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1897 err = -EINVAL;
1898 goto out;
1899 }
1900
1901 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1902 if (err)
1903 goto out;
1904
1905 fnd_sii = fnd_get();
1906 if (!fnd_sii) {
1907 err = -ENOMEM;
1908 goto out;
1909 }
1910
1911 sds_size = inode->i_size;
1912
Kari Argillandere8b8e972021-08-03 14:57:09 +03001913 /* Find the last valid Id. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001914 sbi->security.next_id = SECURITY_ID_FIRST;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001915 /* Always write new security at the end of bucket. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001916 sbi->security.next_off =
Konstantin Komarovd3624462021-08-31 16:57:40 +03001917 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
Konstantin Komarov82cae262021-08-13 17:21:29 +03001918
Konstantin Komarov82cae262021-08-13 17:21:29 +03001919 off = 0;
1920 ne = NULL;
1921
1922 for (;;) {
1923 u32 next_id;
1924
1925 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1926 if (err || !ne)
1927 break;
1928
1929 sii_e = (struct NTFS_DE_SII *)ne;
1930 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1931 continue;
1932
1933 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1934 if (next_id >= sbi->security.next_id)
1935 sbi->security.next_id = next_id;
Konstantin Komarov82cae262021-08-13 17:21:29 +03001936 }
1937
1938 sbi->security.ni = ni;
1939 inode = NULL;
1940out:
1941 iput(inode);
1942 fnd_put(fnd_sii);
1943
1944 return err;
1945}
1946
1947/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001948 * ntfs_get_security_by_id - Read security descriptor by id.
Konstantin Komarov82cae262021-08-13 17:21:29 +03001949 */
1950int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1951 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1952 size_t *size)
1953{
1954 int err;
1955 int diff;
1956 struct ntfs_inode *ni = sbi->security.ni;
1957 struct ntfs_index *indx = &sbi->security.index_sii;
1958 void *p = NULL;
1959 struct NTFS_DE_SII *sii_e;
1960 struct ntfs_fnd *fnd_sii;
1961 struct SECURITY_HDR d_security;
1962 const struct INDEX_ROOT *root_sii;
1963 u32 t32;
1964
1965 *sd = NULL;
1966
1967 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1968
1969 fnd_sii = fnd_get();
1970 if (!fnd_sii) {
1971 err = -ENOMEM;
1972 goto out;
1973 }
1974
1975 root_sii = indx_get_root(indx, ni, NULL, NULL);
1976 if (!root_sii) {
1977 err = -EINVAL;
1978 goto out;
1979 }
1980
Kari Argillandere8b8e972021-08-03 14:57:09 +03001981 /* Try to find this SECURITY descriptor in SII indexes. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001982 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1983 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1984 if (err)
1985 goto out;
1986
1987 if (diff)
1988 goto out;
1989
1990 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1991 if (t32 < SIZEOF_SECURITY_HDR) {
1992 err = -EINVAL;
1993 goto out;
1994 }
1995
1996 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001997 /* Looks like too big security. 0x10000 - is arbitrary big number. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03001998 err = -EFBIG;
1999 goto out;
2000 }
2001
2002 *size = t32 - SIZEOF_SECURITY_HDR;
2003
Kari Argillander195c52b2021-08-24 21:37:07 +03002004 p = kmalloc(*size, GFP_NOFS);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002005 if (!p) {
2006 err = -ENOMEM;
2007 goto out;
2008 }
2009
2010 err = ntfs_read_run_nb(sbi, &ni->file.run,
2011 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2012 sizeof(d_security), NULL);
2013 if (err)
2014 goto out;
2015
2016 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2017 err = -EINVAL;
2018 goto out;
2019 }
2020
2021 err = ntfs_read_run_nb(sbi, &ni->file.run,
2022 le64_to_cpu(sii_e->sec_hdr.off) +
2023 SIZEOF_SECURITY_HDR,
2024 p, *size, NULL);
2025 if (err)
2026 goto out;
2027
2028 *sd = p;
2029 p = NULL;
2030
2031out:
Kari Argillander195c52b2021-08-24 21:37:07 +03002032 kfree(p);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002033 fnd_put(fnd_sii);
2034 ni_unlock(ni);
2035
2036 return err;
2037}
2038
2039/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002040 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
Konstantin Komarov82cae262021-08-13 17:21:29 +03002041 *
2042 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2043 * and it contains a mirror copy of each security descriptor. When writing
2044 * to a security descriptor at location X, another copy will be written at
2045 * location (X+256K).
2046 * When writing a security descriptor that will cross the 256K boundary,
2047 * the pointer will be advanced by 256K to skip
2048 * over the mirror portion.
2049 */
2050int ntfs_insert_security(struct ntfs_sb_info *sbi,
2051 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2052 u32 size_sd, __le32 *security_id, bool *inserted)
2053{
2054 int err, diff;
2055 struct ntfs_inode *ni = sbi->security.ni;
2056 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2057 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2058 struct NTFS_DE_SDH *e;
2059 struct NTFS_DE_SDH sdh_e;
2060 struct NTFS_DE_SII sii_e;
2061 struct SECURITY_HDR *d_security;
2062 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
Kari Argillanderfa3cacf2021-08-26 11:56:29 +03002063 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002064 struct SECURITY_KEY hash_key;
2065 struct ntfs_fnd *fnd_sdh = NULL;
2066 const struct INDEX_ROOT *root_sdh;
2067 const struct INDEX_ROOT *root_sii;
2068 u64 mirr_off, new_sds_size;
2069 u32 next, left;
2070
2071 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2072 SecurityDescriptorsBlockSize);
2073
2074 hash_key.hash = security_hash(sd, size_sd);
2075 hash_key.sec_id = SECURITY_ID_INVALID;
2076
2077 if (inserted)
2078 *inserted = false;
2079 *security_id = SECURITY_ID_INVALID;
2080
Kari Argillandere8b8e972021-08-03 14:57:09 +03002081 /* Allocate a temporal buffer. */
Kari Argillander195c52b2021-08-24 21:37:07 +03002082 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002083 if (!d_security)
2084 return -ENOMEM;
2085
2086 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2087
2088 fnd_sdh = fnd_get();
2089 if (!fnd_sdh) {
2090 err = -ENOMEM;
2091 goto out;
2092 }
2093
2094 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2095 if (!root_sdh) {
2096 err = -EINVAL;
2097 goto out;
2098 }
2099
2100 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2101 if (!root_sii) {
2102 err = -EINVAL;
2103 goto out;
2104 }
2105
2106 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002107 * Check if such security already exists.
2108 * Use "SDH" and hash -> to get the offset in "SDS".
Konstantin Komarov82cae262021-08-13 17:21:29 +03002109 */
2110 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2111 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2112 fnd_sdh);
2113 if (err)
2114 goto out;
2115
2116 while (e) {
2117 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2118 err = ntfs_read_run_nb(sbi, &ni->file.run,
2119 le64_to_cpu(e->sec_hdr.off),
2120 d_security, new_sec_size, NULL);
2121 if (err)
2122 goto out;
2123
2124 if (le32_to_cpu(d_security->size) == new_sec_size &&
2125 d_security->key.hash == hash_key.hash &&
2126 !memcmp(d_security + 1, sd, size_sd)) {
2127 *security_id = d_security->key.sec_id;
Kari Argillandere8b8e972021-08-03 14:57:09 +03002128 /* Such security already exists. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002129 err = 0;
2130 goto out;
2131 }
2132 }
2133
2134 err = indx_find_sort(indx_sdh, ni, root_sdh,
2135 (struct NTFS_DE **)&e, fnd_sdh);
2136 if (err)
2137 goto out;
2138
2139 if (!e || e->key.hash != hash_key.hash)
2140 break;
2141 }
2142
Kari Argillandere8b8e972021-08-03 14:57:09 +03002143 /* Zero unused space. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002144 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2145 left = SecurityDescriptorsBlockSize - next;
2146
Kari Argillandere8b8e972021-08-03 14:57:09 +03002147 /* Zero gap until SecurityDescriptorsBlockSize. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002148 if (left < new_sec_size) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03002149 /* Zero "left" bytes from sbi->security.next_off. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002150 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2151 }
2152
Kari Argillandere8b8e972021-08-03 14:57:09 +03002153 /* Zero tail of previous security. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002154 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2155
2156 /*
2157 * Example:
2158 * 0x40438 == ni->vfs_inode.i_size
2159 * 0x00440 == sbi->security.next_off
2160 * need to zero [0x438-0x440)
2161 * if (next > used) {
2162 * u32 tozero = next - used;
2163 * zero "tozero" bytes from sbi->security.next_off - tozero
2164 */
2165
Kari Argillandere8b8e972021-08-03 14:57:09 +03002166 /* Format new security descriptor. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002167 d_security->key.hash = hash_key.hash;
2168 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2169 d_security->off = cpu_to_le64(sbi->security.next_off);
2170 d_security->size = cpu_to_le32(new_sec_size);
2171 memcpy(d_security + 1, sd, size_sd);
2172
Kari Argillandere8b8e972021-08-03 14:57:09 +03002173 /* Write main SDS bucket. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002174 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2175 d_security, aligned_sec_size);
2176
2177 if (err)
2178 goto out;
2179
2180 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2181 new_sds_size = mirr_off + aligned_sec_size;
2182
2183 if (new_sds_size > ni->vfs_inode.i_size) {
2184 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2185 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2186 new_sds_size, &new_sds_size, false, NULL);
2187 if (err)
2188 goto out;
2189 }
2190
Kari Argillandere8b8e972021-08-03 14:57:09 +03002191 /* Write copy SDS bucket. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002192 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2193 aligned_sec_size);
2194 if (err)
2195 goto out;
2196
Kari Argillandere8b8e972021-08-03 14:57:09 +03002197 /* Fill SII entry. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002198 sii_e.de.view.data_off =
2199 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2200 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2201 sii_e.de.view.res = 0;
2202 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2203 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2204 sii_e.de.flags = 0;
2205 sii_e.de.res = 0;
2206 sii_e.sec_id = d_security->key.sec_id;
2207 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2208
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03002209 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002210 if (err)
2211 goto out;
2212
Kari Argillandere8b8e972021-08-03 14:57:09 +03002213 /* Fill SDH entry. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002214 sdh_e.de.view.data_off =
2215 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2216 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2217 sdh_e.de.view.res = 0;
2218 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2219 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2220 sdh_e.de.flags = 0;
2221 sdh_e.de.res = 0;
2222 sdh_e.key.hash = d_security->key.hash;
2223 sdh_e.key.sec_id = d_security->key.sec_id;
2224 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2225 sdh_e.magic[0] = cpu_to_le16('I');
2226 sdh_e.magic[1] = cpu_to_le16('I');
2227
2228 fnd_clear(fnd_sdh);
2229 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03002230 fnd_sdh, 0);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002231 if (err)
2232 goto out;
2233
2234 *security_id = d_security->key.sec_id;
2235 if (inserted)
2236 *inserted = true;
2237
Kari Argillandere8b8e972021-08-03 14:57:09 +03002238 /* Update Id and offset for next descriptor. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002239 sbi->security.next_id += 1;
2240 sbi->security.next_off += aligned_sec_size;
2241
2242out:
2243 fnd_put(fnd_sdh);
2244 mark_inode_dirty(&ni->vfs_inode);
2245 ni_unlock(ni);
Kari Argillander195c52b2021-08-24 21:37:07 +03002246 kfree(d_security);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002247
2248 return err;
2249}
2250
2251/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002252 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
Konstantin Komarov82cae262021-08-13 17:21:29 +03002253 */
2254int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2255{
2256 int err;
2257 struct ntfs_inode *ni = sbi->reparse.ni;
2258 struct ntfs_index *indx = &sbi->reparse.index_r;
2259 struct ATTRIB *attr;
2260 struct ATTR_LIST_ENTRY *le;
2261 const struct INDEX_ROOT *root_r;
2262
2263 if (!ni)
2264 return 0;
2265
2266 le = NULL;
2267 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2268 ARRAY_SIZE(SR_NAME), NULL, NULL);
2269 if (!attr) {
2270 err = -EINVAL;
2271 goto out;
2272 }
2273
2274 root_r = resident_data(attr);
2275 if (root_r->type != ATTR_ZERO ||
2276 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2277 err = -EINVAL;
2278 goto out;
2279 }
2280
2281 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2282 if (err)
2283 goto out;
2284
2285out:
2286 return err;
2287}
2288
2289/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002290 * ntfs_objid_init - Load and parse $Extend/$ObjId.
Konstantin Komarov82cae262021-08-13 17:21:29 +03002291 */
2292int ntfs_objid_init(struct ntfs_sb_info *sbi)
2293{
2294 int err;
2295 struct ntfs_inode *ni = sbi->objid.ni;
2296 struct ntfs_index *indx = &sbi->objid.index_o;
2297 struct ATTRIB *attr;
2298 struct ATTR_LIST_ENTRY *le;
2299 const struct INDEX_ROOT *root;
2300
2301 if (!ni)
2302 return 0;
2303
2304 le = NULL;
2305 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2306 ARRAY_SIZE(SO_NAME), NULL, NULL);
2307 if (!attr) {
2308 err = -EINVAL;
2309 goto out;
2310 }
2311
2312 root = resident_data(attr);
2313 if (root->type != ATTR_ZERO ||
2314 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2315 err = -EINVAL;
2316 goto out;
2317 }
2318
2319 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2320 if (err)
2321 goto out;
2322
2323out:
2324 return err;
2325}
2326
2327int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2328{
2329 int err;
2330 struct ntfs_inode *ni = sbi->objid.ni;
2331 struct ntfs_index *indx = &sbi->objid.index_o;
2332
2333 if (!ni)
2334 return -EINVAL;
2335
2336 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2337
2338 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2339
2340 mark_inode_dirty(&ni->vfs_inode);
2341 ni_unlock(ni);
2342
2343 return err;
2344}
2345
2346int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2347 const struct MFT_REF *ref)
2348{
2349 int err;
2350 struct ntfs_inode *ni = sbi->reparse.ni;
2351 struct ntfs_index *indx = &sbi->reparse.index_r;
2352 struct NTFS_DE_R re;
2353
2354 if (!ni)
2355 return -EINVAL;
2356
2357 memset(&re, 0, sizeof(re));
2358
2359 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2360 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2361 re.de.key_size = cpu_to_le16(sizeof(re.key));
2362
2363 re.key.ReparseTag = rtag;
2364 memcpy(&re.key.ref, ref, sizeof(*ref));
2365
2366 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2367
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03002368 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
Konstantin Komarov82cae262021-08-13 17:21:29 +03002369
2370 mark_inode_dirty(&ni->vfs_inode);
2371 ni_unlock(ni);
2372
2373 return err;
2374}
2375
2376int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2377 const struct MFT_REF *ref)
2378{
2379 int err, diff;
2380 struct ntfs_inode *ni = sbi->reparse.ni;
2381 struct ntfs_index *indx = &sbi->reparse.index_r;
2382 struct ntfs_fnd *fnd = NULL;
2383 struct REPARSE_KEY rkey;
2384 struct NTFS_DE_R *re;
2385 struct INDEX_ROOT *root_r;
2386
2387 if (!ni)
2388 return -EINVAL;
2389
2390 rkey.ReparseTag = rtag;
2391 rkey.ref = *ref;
2392
2393 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2394
2395 if (rtag) {
2396 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2397 goto out1;
2398 }
2399
2400 fnd = fnd_get();
2401 if (!fnd) {
2402 err = -ENOMEM;
2403 goto out1;
2404 }
2405
2406 root_r = indx_get_root(indx, ni, NULL, NULL);
2407 if (!root_r) {
2408 err = -EINVAL;
2409 goto out;
2410 }
2411
Kari Argillandere8b8e972021-08-03 14:57:09 +03002412 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002413 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2414 (struct NTFS_DE **)&re, fnd);
2415 if (err)
2416 goto out;
2417
2418 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03002419 /* Impossible. Looks like volume corrupt? */
Konstantin Komarov82cae262021-08-13 17:21:29 +03002420 goto out;
2421 }
2422
2423 memcpy(&rkey, &re->key, sizeof(rkey));
2424
2425 fnd_put(fnd);
2426 fnd = NULL;
2427
2428 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2429 if (err)
2430 goto out;
2431
2432out:
2433 fnd_put(fnd);
2434
2435out1:
2436 mark_inode_dirty(&ni->vfs_inode);
2437 ni_unlock(ni);
2438
2439 return err;
2440}
2441
2442static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2443 CLST len)
2444{
2445 ntfs_unmap_meta(sbi->sb, lcn, len);
2446 ntfs_discard(sbi, lcn, len);
2447}
2448
2449void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2450{
2451 CLST end, i;
2452 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2453
2454 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2455 if (!wnd_is_used(wnd, lcn, len)) {
2456 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2457
2458 end = lcn + len;
2459 len = 0;
2460 for (i = lcn; i < end; i++) {
2461 if (wnd_is_used(wnd, i, 1)) {
2462 if (!len)
2463 lcn = i;
2464 len += 1;
2465 continue;
2466 }
2467
2468 if (!len)
2469 continue;
2470
2471 if (trim)
2472 ntfs_unmap_and_discard(sbi, lcn, len);
2473
2474 wnd_set_free(wnd, lcn, len);
2475 len = 0;
2476 }
2477
2478 if (!len)
2479 goto out;
2480 }
2481
2482 if (trim)
2483 ntfs_unmap_and_discard(sbi, lcn, len);
2484 wnd_set_free(wnd, lcn, len);
2485
2486out:
2487 up_write(&wnd->rw_lock);
2488}
2489
2490/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002491 * run_deallocate - Deallocate clusters.
Konstantin Komarov82cae262021-08-13 17:21:29 +03002492 */
2493int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2494{
2495 CLST lcn, len;
2496 size_t idx = 0;
2497
2498 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2499 if (lcn == SPARSE_LCN)
2500 continue;
2501
2502 mark_as_free_ex(sbi, lcn, len, trim);
2503 }
2504
2505 return 0;
2506}