blob: 861e35791506e801dc446414d935d5463e64a1fb [file] [log] [blame]
Konstantin Komarov43423062021-08-13 17:21:29 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
Konstantin Komarov43423062021-08-13 17:21:29 +03008#include <linux/fs.h>
Konstantin Komarov43423062021-08-13 17:21:29 +03009
10#include "debug.h"
11#include "ntfs.h"
12#include "ntfs_fs.h"
13
14static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15 const __le16 *name, u8 name_len,
16 const u16 *upcase)
17{
Kari Argillandere8b8e972021-08-03 14:57:09 +030018 /* First, compare the type codes. */
Konstantin Komarov43423062021-08-13 17:21:29 +030019 int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
20
21 if (diff)
22 return diff;
23
Kari Argillandere8b8e972021-08-03 14:57:09 +030024 /* They have the same type code, so we have to compare the names. */
Konstantin Komarov43423062021-08-13 17:21:29 +030025 return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
26 upcase, true);
27}
28
29/*
30 * mi_new_attt_id
31 *
Kari Argillandere8b8e972021-08-03 14:57:09 +030032 * Return: Unused attribute id that is less than mrec->next_attr_id.
Konstantin Komarov43423062021-08-13 17:21:29 +030033 */
34static __le16 mi_new_attt_id(struct mft_inode *mi)
35{
36 u16 free_id, max_id, t16;
37 struct MFT_REC *rec = mi->mrec;
38 struct ATTRIB *attr;
39 __le16 id;
40
41 id = rec->next_attr_id;
42 free_id = le16_to_cpu(id);
43 if (free_id < 0x7FFF) {
44 rec->next_attr_id = cpu_to_le16(free_id + 1);
45 return id;
46 }
47
Kari Argillandere8b8e972021-08-03 14:57:09 +030048 /* One record can store up to 1024/24 ~= 42 attributes. */
Konstantin Komarov43423062021-08-13 17:21:29 +030049 free_id = 0;
50 max_id = 0;
51
52 attr = NULL;
53
54 for (;;) {
55 attr = mi_enum_attr(mi, attr);
56 if (!attr) {
57 rec->next_attr_id = cpu_to_le16(max_id + 1);
58 mi->dirty = true;
59 return cpu_to_le16(free_id);
60 }
61
62 t16 = le16_to_cpu(attr->id);
63 if (t16 == free_id) {
64 free_id += 1;
65 attr = NULL;
66 } else if (max_id < t16)
67 max_id = t16;
68 }
69}
70
71int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
72{
73 int err;
Kari Argillander195c52b2021-08-24 21:37:07 +030074 struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
Konstantin Komarov43423062021-08-13 17:21:29 +030075
76 if (!m)
77 return -ENOMEM;
78
79 err = mi_init(m, sbi, rno);
80 if (err) {
Kari Argillander195c52b2021-08-24 21:37:07 +030081 kfree(m);
Konstantin Komarov43423062021-08-13 17:21:29 +030082 return err;
83 }
84
85 err = mi_read(m, false);
86 if (err) {
87 mi_put(m);
88 return err;
89 }
90
91 *mi = m;
92 return 0;
93}
94
95void mi_put(struct mft_inode *mi)
96{
97 mi_clear(mi);
Kari Argillander195c52b2021-08-24 21:37:07 +030098 kfree(mi);
Konstantin Komarov43423062021-08-13 17:21:29 +030099}
100
101int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
102{
103 mi->sbi = sbi;
104 mi->rno = rno;
Kari Argillander195c52b2021-08-24 21:37:07 +0300105 mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
Konstantin Komarov43423062021-08-13 17:21:29 +0300106 if (!mi->mrec)
107 return -ENOMEM;
108
109 return 0;
110}
111
112/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300113 * mi_read - Read MFT data.
Konstantin Komarov43423062021-08-13 17:21:29 +0300114 */
115int mi_read(struct mft_inode *mi, bool is_mft)
116{
117 int err;
118 struct MFT_REC *rec = mi->mrec;
119 struct ntfs_sb_info *sbi = mi->sbi;
120 u32 bpr = sbi->record_size;
121 u64 vbo = (u64)mi->rno << sbi->record_bits;
122 struct ntfs_inode *mft_ni = sbi->mft.ni;
123 struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124 struct rw_semaphore *rw_lock = NULL;
125
126 if (is_mounted(sbi)) {
127 if (!is_mft) {
128 rw_lock = &mft_ni->file.run_lock;
129 down_read(rw_lock);
130 }
131 }
132
133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
134 if (rw_lock)
135 up_read(rw_lock);
136 if (!err)
137 goto ok;
138
139 if (err == -E_NTFS_FIXUP) {
140 mi->dirty = true;
141 goto ok;
142 }
143
144 if (err != -ENOENT)
145 goto out;
146
147 if (rw_lock) {
148 ni_lock(mft_ni);
149 down_write(rw_lock);
150 }
151 err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
152 vbo >> sbi->cluster_bits);
153 if (rw_lock) {
154 up_write(rw_lock);
155 ni_unlock(mft_ni);
156 }
157 if (err)
158 goto out;
159
160 if (rw_lock)
161 down_read(rw_lock);
162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
163 if (rw_lock)
164 up_read(rw_lock);
165
166 if (err == -E_NTFS_FIXUP) {
167 mi->dirty = true;
168 goto ok;
169 }
170 if (err)
171 goto out;
172
173ok:
Kari Argillandere8b8e972021-08-03 14:57:09 +0300174 /* Check field 'total' only here. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300175 if (le32_to_cpu(rec->total) != bpr) {
176 err = -EINVAL;
177 goto out;
178 }
179
180 return 0;
181
182out:
183 return err;
184}
185
186struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
187{
188 const struct MFT_REC *rec = mi->mrec;
189 u32 used = le32_to_cpu(rec->used);
190 u32 t32, off, asize;
191 u16 t16;
192
193 if (!attr) {
194 u32 total = le32_to_cpu(rec->total);
195
196 off = le16_to_cpu(rec->attr_off);
197
198 if (used > total)
199 return NULL;
200
201 if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300202 !IS_ALIGNED(off, 4)) {
Konstantin Komarov43423062021-08-13 17:21:29 +0300203 return NULL;
204 }
205
Kari Argillandere8b8e972021-08-03 14:57:09 +0300206 /* Skip non-resident records. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300207 if (!is_rec_inuse(rec))
208 return NULL;
209
210 attr = Add2Ptr(rec, off);
211 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300212 /* Check if input attr inside record. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300213 off = PtrOffset(rec, attr);
214 if (off >= used)
215 return NULL;
216
217 asize = le32_to_cpu(attr->size);
218 if (asize < SIZEOF_RESIDENT) {
Konstantin Komarovd3624462021-08-31 16:57:40 +0300219 /* Impossible 'cause we should not return such attribute. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300220 return NULL;
221 }
222
223 attr = Add2Ptr(attr, asize);
224 off += asize;
225 }
226
227 asize = le32_to_cpu(attr->size);
228
Kari Argillandere8b8e972021-08-03 14:57:09 +0300229 /* Can we use the first field (attr->type). */
Konstantin Komarov43423062021-08-13 17:21:29 +0300230 if (off + 8 > used) {
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300231 static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
Konstantin Komarov43423062021-08-13 17:21:29 +0300232 return NULL;
233 }
234
235 if (attr->type == ATTR_END) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300236 /* End of enumeration. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300237 return NULL;
238 }
239
Kari Argillandere8b8e972021-08-03 14:57:09 +0300240 /* 0x100 is last known attribute for now. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300241 t32 = le32_to_cpu(attr->type);
242 if ((t32 & 0xf) || (t32 > 0x100))
243 return NULL;
244
Kari Argillandere8b8e972021-08-03 14:57:09 +0300245 /* Check boundary. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300246 if (off + asize > used)
247 return NULL;
248
Kari Argillandere8b8e972021-08-03 14:57:09 +0300249 /* Check size of attribute. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300250 if (!attr->non_res) {
251 if (asize < SIZEOF_RESIDENT)
252 return NULL;
253
254 t16 = le16_to_cpu(attr->res.data_off);
255
256 if (t16 > asize)
257 return NULL;
258
259 t32 = le32_to_cpu(attr->res.data_size);
260 if (t16 + t32 > asize)
261 return NULL;
262
263 return attr;
264 }
265
Kari Argillandere8b8e972021-08-03 14:57:09 +0300266 /* Check some nonresident fields. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300267 if (attr->name_len &&
268 le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
269 le16_to_cpu(attr->nres.run_off)) {
270 return NULL;
271 }
272
273 if (attr->nres.svcn || !is_attr_ext(attr)) {
274 if (asize + 8 < SIZEOF_NONRESIDENT)
275 return NULL;
276
277 if (attr->nres.c_unit)
278 return NULL;
279 } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
280 return NULL;
281
282 return attr;
283}
284
285/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300286 * mi_find_attr - Find the attribute by type and name and id.
Konstantin Komarov43423062021-08-13 17:21:29 +0300287 */
288struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
289 enum ATTR_TYPE type, const __le16 *name,
290 size_t name_len, const __le16 *id)
291{
292 u32 type_in = le32_to_cpu(type);
293 u32 atype;
294
295next_attr:
296 attr = mi_enum_attr(mi, attr);
297 if (!attr)
298 return NULL;
299
300 atype = le32_to_cpu(attr->type);
301 if (atype > type_in)
302 return NULL;
303
304 if (atype < type_in)
305 goto next_attr;
306
307 if (attr->name_len != name_len)
308 goto next_attr;
309
310 if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
311 goto next_attr;
312
313 if (id && *id != attr->id)
314 goto next_attr;
315
316 return attr;
317}
318
319int mi_write(struct mft_inode *mi, int wait)
320{
321 struct MFT_REC *rec;
322 int err;
323 struct ntfs_sb_info *sbi;
324
325 if (!mi->dirty)
326 return 0;
327
328 sbi = mi->sbi;
329 rec = mi->mrec;
330
331 err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
332 if (err)
333 return err;
334
335 if (mi->rno < sbi->mft.recs_mirr)
336 sbi->flags |= NTFS_FLAGS_MFTMIRR;
337
338 mi->dirty = false;
339
340 return 0;
341}
342
343int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
344 __le16 flags, bool is_mft)
345{
346 int err;
347 u16 seq = 1;
348 struct MFT_REC *rec;
349 u64 vbo = (u64)rno << sbi->record_bits;
350
351 err = mi_init(mi, sbi, rno);
352 if (err)
353 return err;
354
355 rec = mi->mrec;
356
357 if (rno == MFT_REC_MFT) {
358 ;
359 } else if (rno < MFT_REC_FREE) {
360 seq = rno;
361 } else if (rno >= sbi->mft.used) {
362 ;
363 } else if (mi_read(mi, is_mft)) {
364 ;
365 } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300366 /* Record is reused. Update its sequence number. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300367 seq = le16_to_cpu(rec->seq) + 1;
368 if (!seq)
369 seq = 1;
370 }
371
372 memcpy(rec, sbi->new_rec, sbi->record_size);
373
374 rec->seq = cpu_to_le16(seq);
375 rec->flags = RECORD_FLAG_IN_USE | flags;
376
377 mi->dirty = true;
378
379 if (!mi->nb.nbufs) {
380 struct ntfs_inode *ni = sbi->mft.ni;
381 bool lock = false;
382
383 if (is_mounted(sbi) && !is_mft) {
384 down_read(&ni->file.run_lock);
385 lock = true;
386 }
387
388 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
389 &mi->nb);
390 if (lock)
391 up_read(&ni->file.run_lock);
392 }
393
394 return err;
395}
396
397/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300398 * mi_mark_free - Mark record as unused and marks it as free in bitmap.
Konstantin Komarov43423062021-08-13 17:21:29 +0300399 */
400void mi_mark_free(struct mft_inode *mi)
401{
402 CLST rno = mi->rno;
403 struct ntfs_sb_info *sbi = mi->sbi;
404
405 if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
406 ntfs_clear_mft_tail(sbi, rno, rno + 1);
407 mi->dirty = false;
408 return;
409 }
410
411 if (mi->mrec) {
412 clear_rec_inuse(mi->mrec);
413 mi->dirty = true;
414 mi_write(mi, 0);
415 }
416 ntfs_mark_rec_free(sbi, rno);
417}
418
419/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300420 * mi_insert_attr - Reserve space for new attribute.
Konstantin Komarov43423062021-08-13 17:21:29 +0300421 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300422 * Return: Not full constructed attribute or NULL if not possible to create.
Konstantin Komarov43423062021-08-13 17:21:29 +0300423 */
424struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
425 const __le16 *name, u8 name_len, u32 asize,
426 u16 name_off)
427{
428 size_t tail;
429 struct ATTRIB *attr;
430 __le16 id;
431 struct MFT_REC *rec = mi->mrec;
432 struct ntfs_sb_info *sbi = mi->sbi;
433 u32 used = le32_to_cpu(rec->used);
434 const u16 *upcase = sbi->upcase;
435 int diff;
436
437 /* Can we insert mi attribute? */
438 if (used + asize > mi->sbi->record_size)
439 return NULL;
440
441 /*
442 * Scan through the list of attributes to find the point
443 * at which we should insert it.
444 */
445 attr = NULL;
446 while ((attr = mi_enum_attr(mi, attr))) {
447 diff = compare_attr(attr, type, name, name_len, upcase);
448 if (diff > 0)
449 break;
450 if (diff < 0)
451 continue;
452
453 if (!is_attr_indexed(attr))
454 return NULL;
455 break;
456 }
457
458 if (!attr) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300459 tail = 8; /* Not used, just to suppress warning. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300460 attr = Add2Ptr(rec, used - 8);
461 } else {
462 tail = used - PtrOffset(rec, attr);
463 }
464
465 id = mi_new_attt_id(mi);
466
467 memmove(Add2Ptr(attr, asize), attr, tail);
468 memset(attr, 0, asize);
469
470 attr->type = type;
471 attr->size = cpu_to_le32(asize);
472 attr->name_len = name_len;
473 attr->name_off = cpu_to_le16(name_off);
474 attr->id = id;
475
476 memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
477 rec->used = cpu_to_le32(used + asize);
478
479 mi->dirty = true;
480
481 return attr;
482}
483
484/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300485 * mi_remove_attr - Remove the attribute from record.
Konstantin Komarov43423062021-08-13 17:21:29 +0300486 *
Kari Argillandere8b8e972021-08-03 14:57:09 +0300487 * NOTE: The source attr will point to next attribute.
Konstantin Komarov43423062021-08-13 17:21:29 +0300488 */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300489bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
490 struct ATTRIB *attr)
Konstantin Komarov43423062021-08-13 17:21:29 +0300491{
492 struct MFT_REC *rec = mi->mrec;
493 u32 aoff = PtrOffset(rec, attr);
494 u32 used = le32_to_cpu(rec->used);
495 u32 asize = le32_to_cpu(attr->size);
496
497 if (aoff + asize > used)
498 return false;
499
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300500 if (ni && is_attr_indexed(attr)) {
501 le16_add_cpu(&ni->mi.mrec->hard_links, -1);
502 ni->mi.dirty = true;
503 }
504
Konstantin Komarov43423062021-08-13 17:21:29 +0300505 used -= asize;
506 memmove(attr, Add2Ptr(attr, asize), used - aoff);
507 rec->used = cpu_to_le32(used);
508 mi->dirty = true;
509
510 return true;
511}
512
513/* bytes = "new attribute size" - "old attribute size" */
514bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
515{
516 struct MFT_REC *rec = mi->mrec;
517 u32 aoff = PtrOffset(rec, attr);
518 u32 total, used = le32_to_cpu(rec->used);
519 u32 nsize, asize = le32_to_cpu(attr->size);
520 u32 rsize = le32_to_cpu(attr->res.data_size);
521 int tail = (int)(used - aoff - asize);
522 int dsize;
523 char *next;
524
525 if (tail < 0 || aoff >= used)
526 return false;
527
528 if (!bytes)
529 return true;
530
531 total = le32_to_cpu(rec->total);
532 next = Add2Ptr(attr, asize);
533
534 if (bytes > 0) {
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300535 dsize = ALIGN(bytes, 8);
Konstantin Komarov43423062021-08-13 17:21:29 +0300536 if (used + dsize > total)
537 return false;
538 nsize = asize + dsize;
Kari Argillandere8b8e972021-08-03 14:57:09 +0300539 /* Move tail */
Konstantin Komarov43423062021-08-13 17:21:29 +0300540 memmove(next + dsize, next, tail);
541 memset(next, 0, dsize);
542 used += dsize;
543 rsize += dsize;
544 } else {
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300545 dsize = ALIGN(-bytes, 8);
Konstantin Komarov43423062021-08-13 17:21:29 +0300546 if (dsize > asize)
547 return false;
548 nsize = asize - dsize;
549 memmove(next - dsize, next, tail);
550 used -= dsize;
551 rsize -= dsize;
552 }
553
554 rec->used = cpu_to_le32(used);
555 attr->size = cpu_to_le32(nsize);
556 if (!attr->non_res)
557 attr->res.data_size = cpu_to_le32(rsize);
558 mi->dirty = true;
559
560 return true;
561}
562
563int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
564 struct runs_tree *run, CLST len)
565{
566 int err = 0;
567 struct ntfs_sb_info *sbi = mi->sbi;
568 u32 new_run_size;
569 CLST plen;
570 struct MFT_REC *rec = mi->mrec;
571 CLST svcn = le64_to_cpu(attr->nres.svcn);
572 u32 used = le32_to_cpu(rec->used);
573 u32 aoff = PtrOffset(rec, attr);
574 u32 asize = le32_to_cpu(attr->size);
575 char *next = Add2Ptr(attr, asize);
576 u16 run_off = le16_to_cpu(attr->nres.run_off);
577 u32 run_size = asize - run_off;
578 u32 tail = used - aoff - asize;
579 u32 dsize = sbi->record_size - used;
580
Kari Argillandere8b8e972021-08-03 14:57:09 +0300581 /* Make a maximum gap in current record. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300582 memmove(next + dsize, next, tail);
583
Kari Argillandere8b8e972021-08-03 14:57:09 +0300584 /* Pack as much as possible. */
Konstantin Komarov43423062021-08-13 17:21:29 +0300585 err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
586 &plen);
587 if (err < 0) {
588 memmove(next, next + dsize, tail);
589 return err;
590 }
591
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300592 new_run_size = ALIGN(err, 8);
Konstantin Komarov43423062021-08-13 17:21:29 +0300593
594 memmove(next + new_run_size - run_size, next + dsize, tail);
595
596 attr->size = cpu_to_le32(asize + new_run_size - run_size);
597 attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
598 rec->used = cpu_to_le32(used + new_run_size - run_size);
599 mi->dirty = true;
600
601 return 0;
602}