blob: 8a00fa978f5f9c3d496bd333bf1ace72d9194f37 [file] [log] [blame]
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
Kari Argillandere8b8e972021-08-03 14:57:09 +03006 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03007 */
8
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03009#include <linux/fs.h>
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030010#include <linux/slab.h>
Kari Argillander6e3331e2021-09-07 17:28:42 +030011#include <linux/kernel.h>
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030012
13#include "debug.h"
14#include "ntfs.h"
15#include "ntfs_fs.h"
16
17/*
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
Kari Argillandere8b8e972021-08-03 14:57:09 +030019 * preallocate algorithm.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030020 */
21#ifndef NTFS_MIN_LOG2_OF_CLUMP
22#define NTFS_MIN_LOG2_OF_CLUMP 16
23#endif
24
25#ifndef NTFS_MAX_LOG2_OF_CLUMP
26#define NTFS_MAX_LOG2_OF_CLUMP 26
27#endif
28
29// 16M
30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31// 16G
32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030034static inline u64 get_pre_allocated(u64 size)
35{
36 u32 clump;
37 u8 align_shift;
38 u64 ret;
39
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 } else {
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
50 }
51
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
53
54 return ret;
55}
56
57/*
58 * attr_must_be_resident
59 *
Kari Argillandere8b8e972021-08-03 14:57:09 +030060 * Return: True if attribute must be resident.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030061 */
62static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
63 enum ATTR_TYPE type)
64{
65 const struct ATTR_DEF_ENTRY *de;
66
67 switch (type) {
68 case ATTR_STD:
69 case ATTR_NAME:
70 case ATTR_ID:
71 case ATTR_LABEL:
72 case ATTR_VOL_INFO:
73 case ATTR_ROOT:
74 case ATTR_EA_INFO:
75 return true;
76 default:
77 de = ntfs_query_def(sbi, type);
78 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
79 return true;
80 return false;
81 }
82}
83
84/*
Kari Argillandere8b8e972021-08-03 14:57:09 +030085 * attr_load_runs - Load all runs stored in @attr.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +030086 */
87int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 struct runs_tree *run, const CLST *vcn)
89{
90 int err;
91 CLST svcn = le64_to_cpu(attr->nres.svcn);
92 CLST evcn = le64_to_cpu(attr->nres.evcn);
93 u32 asize;
94 u16 run_off;
95
96 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
97 return 0;
98
99 if (vcn && (evcn < *vcn || *vcn < svcn))
100 return -EINVAL;
101
102 asize = le32_to_cpu(attr->size);
103 run_off = le16_to_cpu(attr->nres.run_off);
104 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
105 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
106 asize - run_off);
107 if (err < 0)
108 return err;
109
110 return 0;
111}
112
113/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300114 * run_deallocate_ex - Deallocate clusters.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300115 */
116static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
117 CLST vcn, CLST len, CLST *done, bool trim)
118{
119 int err = 0;
120 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
121 size_t idx;
122
123 if (!len)
124 goto out;
125
126 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
127failed:
128 run_truncate(run, vcn0);
129 err = -EINVAL;
130 goto out;
131 }
132
133 for (;;) {
134 if (clen > len)
135 clen = len;
136
137 if (!clen) {
138 err = -EINVAL;
139 goto out;
140 }
141
142 if (lcn != SPARSE_LCN) {
143 mark_as_free_ex(sbi, lcn, clen, trim);
144 dn += clen;
145 }
146
147 len -= clen;
148 if (!len)
149 break;
150
151 vcn_next = vcn + clen;
152 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
153 vcn != vcn_next) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300154 /* Save memory - don't load entire run. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300155 goto failed;
156 }
157 }
158
159out:
160 if (done)
161 *done += dn;
162
163 return err;
164}
165
166/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300167 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300168 */
169int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
170 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
171 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
172 CLST *new_lcn)
173{
174 int err;
175 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
176 struct wnd_bitmap *wnd = &sbi->used.bitmap;
177 size_t cnt = run->count;
178
179 for (;;) {
180 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
181 opt);
182
183 if (err == -ENOSPC && pre) {
184 pre = 0;
185 if (*pre_alloc)
186 *pre_alloc = 0;
187 continue;
188 }
189
190 if (err)
191 goto out;
192
193 if (new_lcn && vcn == vcn0)
194 *new_lcn = lcn;
195
Kari Argillandere8b8e972021-08-03 14:57:09 +0300196 /* Add new fragment into run storage. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300197 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
Konstantin Komarovd3624462021-08-31 16:57:40 +0300198 /* Undo last 'ntfs_look_for_free_space' */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300199 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
200 wnd_set_free(wnd, lcn, flen);
201 up_write(&wnd->rw_lock);
202 err = -ENOMEM;
203 goto out;
204 }
205
206 vcn += flen;
207
208 if (flen >= len || opt == ALLOCATE_MFT ||
209 (fr && run->count - cnt >= fr)) {
210 *alen = vcn - vcn0;
211 return 0;
212 }
213
214 len -= flen;
215 }
216
217out:
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300218 /* Undo 'ntfs_look_for_free_space' */
219 if (vcn - vcn0) {
220 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
221 run_truncate(run, vcn0);
222 }
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300223
224 return err;
225}
226
227/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300228 * attr_make_nonresident
229 *
230 * If page is not NULL - it is already contains resident data
231 * and locked (called from ni_write_frame()).
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300232 */
233int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
234 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
235 u64 new_size, struct runs_tree *run,
236 struct ATTRIB **ins_attr, struct page *page)
237{
238 struct ntfs_sb_info *sbi;
239 struct ATTRIB *attr_s;
240 struct MFT_REC *rec;
241 u32 used, asize, rsize, aoff, align;
242 bool is_data;
243 CLST len, alen;
244 char *next;
245 int err;
246
247 if (attr->non_res) {
248 *ins_attr = attr;
249 return 0;
250 }
251
252 sbi = mi->sbi;
253 rec = mi->mrec;
254 attr_s = NULL;
255 used = le32_to_cpu(rec->used);
256 asize = le32_to_cpu(attr->size);
257 next = Add2Ptr(attr, asize);
258 aoff = PtrOffset(rec, attr);
259 rsize = le32_to_cpu(attr->res.data_size);
260 is_data = attr->type == ATTR_DATA && !attr->name_len;
261
262 align = sbi->cluster_size;
263 if (is_attr_compressed(attr))
264 align <<= COMPRESSION_UNIT;
265 len = (rsize + align - 1) >> sbi->cluster_bits;
266
267 run_init(run);
268
Kari Argillandere8b8e972021-08-03 14:57:09 +0300269 /* Make a copy of original attribute. */
Kari Argillander195c52b2021-08-24 21:37:07 +0300270 attr_s = kmemdup(attr, asize, GFP_NOFS);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300271 if (!attr_s) {
272 err = -ENOMEM;
273 goto out;
274 }
275
276 if (!len) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300277 /* Empty resident -> Empty nonresident. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300278 alen = 0;
279 } else {
280 const char *data = resident_data(attr);
281
282 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
283 ALLOCATE_DEF, &alen, 0, NULL);
284 if (err)
285 goto out1;
286
287 if (!rsize) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300288 /* Empty resident -> Non empty nonresident. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300289 } else if (!is_data) {
Konstantin Komarov63544672021-09-09 13:15:20 +0300290 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300291 if (err)
292 goto out2;
293 } else if (!page) {
294 char *kaddr;
295
296 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
297 if (!page) {
298 err = -ENOMEM;
299 goto out2;
300 }
301 kaddr = kmap_atomic(page);
302 memcpy(kaddr, data, rsize);
303 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
304 kunmap_atomic(kaddr);
305 flush_dcache_page(page);
306 SetPageUptodate(page);
307 set_page_dirty(page);
308 unlock_page(page);
309 put_page(page);
310 }
311 }
312
Kari Argillandere8b8e972021-08-03 14:57:09 +0300313 /* Remove original attribute. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300314 used -= asize;
315 memmove(attr, Add2Ptr(attr, asize), used - aoff);
316 rec->used = cpu_to_le32(used);
317 mi->dirty = true;
318 if (le)
319 al_remove_le(ni, le);
320
321 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
322 attr_s->name_len, run, 0, alen,
323 attr_s->flags, &attr, NULL);
324 if (err)
325 goto out3;
326
Kari Argillander195c52b2021-08-24 21:37:07 +0300327 kfree(attr_s);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300328 attr->nres.data_size = cpu_to_le64(rsize);
329 attr->nres.valid_size = attr->nres.data_size;
330
331 *ins_attr = attr;
332
333 if (is_data)
334 ni->ni_flags &= ~NI_FLAG_RESIDENT;
335
Kari Argillandere8b8e972021-08-03 14:57:09 +0300336 /* Resident attribute becomes non resident. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300337 return 0;
338
339out3:
340 attr = Add2Ptr(rec, aoff);
341 memmove(next, attr, used - aoff);
342 memcpy(attr, attr_s, asize);
343 rec->used = cpu_to_le32(used + asize);
344 mi->dirty = true;
345out2:
Kari Argillandere8b8e972021-08-03 14:57:09 +0300346 /* Undo: do not trim new allocated clusters. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300347 run_deallocate(sbi, run, false);
348 run_close(run);
349out1:
Kari Argillander195c52b2021-08-24 21:37:07 +0300350 kfree(attr_s);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300351out:
352 return err;
353}
354
355/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300356 * attr_set_size_res - Helper for attr_set_size().
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300357 */
358static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
359 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
360 u64 new_size, struct runs_tree *run,
361 struct ATTRIB **ins_attr)
362{
363 struct ntfs_sb_info *sbi = mi->sbi;
364 struct MFT_REC *rec = mi->mrec;
365 u32 used = le32_to_cpu(rec->used);
366 u32 asize = le32_to_cpu(attr->size);
367 u32 aoff = PtrOffset(rec, attr);
368 u32 rsize = le32_to_cpu(attr->res.data_size);
369 u32 tail = used - aoff - asize;
370 char *next = Add2Ptr(attr, asize);
Kari Argillanderfa3cacf2021-08-26 11:56:29 +0300371 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300372
373 if (dsize < 0) {
374 memmove(next + dsize, next, tail);
375 } else if (dsize > 0) {
376 if (used + dsize > sbi->max_bytes_per_attr)
377 return attr_make_nonresident(ni, attr, le, mi, new_size,
378 run, ins_attr, NULL);
379
380 memmove(next + dsize, next, tail);
381 memset(next, 0, dsize);
382 }
383
384 if (new_size > rsize)
385 memset(Add2Ptr(resident_data(attr), rsize), 0,
386 new_size - rsize);
387
388 rec->used = cpu_to_le32(used + dsize);
389 attr->size = cpu_to_le32(asize + dsize);
390 attr->res.data_size = cpu_to_le32(new_size);
391 mi->dirty = true;
392 *ins_attr = attr;
393
394 return 0;
395}
396
397/*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300398 * attr_set_size - Change the size of attribute.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300399 *
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300400 * Extend:
Kari Argillandere8b8e972021-08-03 14:57:09 +0300401 * - Sparse/compressed: No allocated clusters.
402 * - Normal: Append allocated and preallocated new clusters.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300403 * Shrink:
Kari Argillandere8b8e972021-08-03 14:57:09 +0300404 * - No deallocate if @keep_prealloc is set.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300405 */
406int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
407 const __le16 *name, u8 name_len, struct runs_tree *run,
408 u64 new_size, const u64 *new_valid, bool keep_prealloc,
409 struct ATTRIB **ret)
410{
411 int err = 0;
412 struct ntfs_sb_info *sbi = ni->mi.sbi;
413 u8 cluster_bits = sbi->cluster_bits;
414 bool is_mft =
415 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
416 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
417 struct ATTRIB *attr = NULL, *attr_b;
418 struct ATTR_LIST_ENTRY *le, *le_b;
419 struct mft_inode *mi, *mi_b;
420 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
421 CLST next_svcn, pre_alloc = -1, done = 0;
422 bool is_ext;
423 u32 align;
424 struct MFT_REC *rec;
425
426again:
427 le_b = NULL;
428 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
429 &mi_b);
430 if (!attr_b) {
431 err = -ENOENT;
432 goto out;
433 }
434
435 if (!attr_b->non_res) {
436 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
437 &attr_b);
438 if (err || !attr_b->non_res)
439 goto out;
440
Kari Argillandere8b8e972021-08-03 14:57:09 +0300441 /* Layout of records may be changed, so do a full search. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300442 goto again;
443 }
444
445 is_ext = is_attr_ext(attr_b);
446
447again_1:
448 align = sbi->cluster_size;
449
450 if (is_ext) {
451 align <<= attr_b->nres.c_unit;
452 if (is_attr_sparsed(attr_b))
453 keep_prealloc = false;
454 }
455
456 old_valid = le64_to_cpu(attr_b->nres.valid_size);
457 old_size = le64_to_cpu(attr_b->nres.data_size);
458 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
459 old_alen = old_alloc >> cluster_bits;
460
461 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
462 new_alen = new_alloc >> cluster_bits;
463
464 if (keep_prealloc && is_ext)
465 keep_prealloc = false;
466
467 if (keep_prealloc && new_size < old_size) {
468 attr_b->nres.data_size = cpu_to_le64(new_size);
469 mi_b->dirty = true;
470 goto ok;
471 }
472
473 vcn = old_alen - 1;
474
475 svcn = le64_to_cpu(attr_b->nres.svcn);
476 evcn = le64_to_cpu(attr_b->nres.evcn);
477
478 if (svcn <= vcn && vcn <= evcn) {
479 attr = attr_b;
480 le = le_b;
481 mi = mi_b;
482 } else if (!le_b) {
483 err = -EINVAL;
484 goto out;
485 } else {
486 le = le_b;
487 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
488 &mi);
489 if (!attr) {
490 err = -EINVAL;
491 goto out;
492 }
493
494next_le_1:
495 svcn = le64_to_cpu(attr->nres.svcn);
496 evcn = le64_to_cpu(attr->nres.evcn);
497 }
498
499next_le:
500 rec = mi->mrec;
501
502 err = attr_load_runs(attr, ni, run, NULL);
503 if (err)
504 goto out;
505
506 if (new_size > old_size) {
507 CLST to_allocate;
508 size_t free;
509
510 if (new_alloc <= old_alloc) {
511 attr_b->nres.data_size = cpu_to_le64(new_size);
512 mi_b->dirty = true;
513 goto ok;
514 }
515
516 to_allocate = new_alen - old_alen;
517add_alloc_in_same_attr_seg:
518 lcn = 0;
519 if (is_mft) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300520 /* MFT allocates clusters from MFT zone. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300521 pre_alloc = 0;
522 } else if (is_ext) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300523 /* No preallocate for sparse/compress. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300524 pre_alloc = 0;
525 } else if (pre_alloc == -1) {
526 pre_alloc = 0;
527 if (type == ATTR_DATA && !name_len &&
Kari Argillander564c97b2021-09-07 18:35:51 +0300528 sbi->options->prealloc) {
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300529 CLST new_alen2 = bytes_to_cluster(
530 sbi, get_pre_allocated(new_size));
531 pre_alloc = new_alen2 - new_alen;
532 }
533
Kari Argillandere8b8e972021-08-03 14:57:09 +0300534 /* Get the last LCN to allocate from. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300535 if (old_alen &&
536 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
537 lcn = SPARSE_LCN;
538 }
539
540 if (lcn == SPARSE_LCN)
541 lcn = 0;
542 else if (lcn)
543 lcn += 1;
544
545 free = wnd_zeroes(&sbi->used.bitmap);
546 if (to_allocate > free) {
547 err = -ENOSPC;
548 goto out;
549 }
550
551 if (pre_alloc && to_allocate + pre_alloc > free)
552 pre_alloc = 0;
553 }
554
555 vcn = old_alen;
556
557 if (is_ext) {
558 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
559 false)) {
560 err = -ENOMEM;
561 goto out;
562 }
563 alen = to_allocate;
564 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300565 /* ~3 bytes per fragment. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300566 err = attr_allocate_clusters(
567 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
568 is_mft ? ALLOCATE_MFT : 0, &alen,
569 is_mft ? 0
570 : (sbi->record_size -
571 le32_to_cpu(rec->used) + 8) /
572 3 +
573 1,
574 NULL);
575 if (err)
576 goto out;
577 }
578
579 done += alen;
580 vcn += alen;
581 if (to_allocate > alen)
582 to_allocate -= alen;
583 else
584 to_allocate = 0;
585
586pack_runs:
587 err = mi_pack_runs(mi, attr, run, vcn - svcn);
588 if (err)
589 goto out;
590
591 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
592 new_alloc_tmp = (u64)next_svcn << cluster_bits;
593 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
594 mi_b->dirty = true;
595
596 if (next_svcn >= vcn && !to_allocate) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300597 /* Normal way. Update attribute and exit. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300598 attr_b->nres.data_size = cpu_to_le64(new_size);
599 goto ok;
600 }
601
Kari Argillandere8b8e972021-08-03 14:57:09 +0300602 /* At least two MFT to avoid recursive loop. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300603 if (is_mft && next_svcn == vcn &&
604 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
605 new_size = new_alloc_tmp;
606 attr_b->nres.data_size = attr_b->nres.alloc_size;
607 goto ok;
608 }
609
610 if (le32_to_cpu(rec->used) < sbi->record_size) {
611 old_alen = next_svcn;
612 evcn = old_alen - 1;
613 goto add_alloc_in_same_attr_seg;
614 }
615
616 attr_b->nres.data_size = attr_b->nres.alloc_size;
617 if (new_alloc_tmp < old_valid)
618 attr_b->nres.valid_size = attr_b->nres.data_size;
619
620 if (type == ATTR_LIST) {
621 err = ni_expand_list(ni);
622 if (err)
623 goto out;
624 if (next_svcn < vcn)
625 goto pack_runs;
626
Kari Argillandere8b8e972021-08-03 14:57:09 +0300627 /* Layout of records is changed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300628 goto again;
629 }
630
631 if (!ni->attr_list.size) {
632 err = ni_create_attr_list(ni);
633 if (err)
634 goto out;
Kari Argillandere8b8e972021-08-03 14:57:09 +0300635 /* Layout of records is changed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300636 }
637
638 if (next_svcn >= vcn) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300639 /* This is MFT data, repeat. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300640 goto again;
641 }
642
Kari Argillandere8b8e972021-08-03 14:57:09 +0300643 /* Insert new attribute segment. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300644 err = ni_insert_nonresident(ni, type, name, name_len, run,
645 next_svcn, vcn - next_svcn,
646 attr_b->flags, &attr, &mi);
647 if (err)
648 goto out;
649
650 if (!is_mft)
651 run_truncate_head(run, evcn + 1);
652
653 svcn = le64_to_cpu(attr->nres.svcn);
654 evcn = le64_to_cpu(attr->nres.evcn);
655
656 le_b = NULL;
Kari Argillandere8b8e972021-08-03 14:57:09 +0300657 /*
658 * Layout of records maybe changed.
659 * Find base attribute to update.
660 */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300661 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
662 NULL, &mi_b);
663 if (!attr_b) {
664 err = -ENOENT;
665 goto out;
666 }
667
668 attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
669 attr_b->nres.data_size = attr_b->nres.alloc_size;
670 attr_b->nres.valid_size = attr_b->nres.alloc_size;
671 mi_b->dirty = true;
672 goto again_1;
673 }
674
675 if (new_size != old_size ||
676 (new_alloc != old_alloc && !keep_prealloc)) {
677 vcn = max(svcn, new_alen);
678 new_alloc_tmp = (u64)vcn << cluster_bits;
679
680 alen = 0;
681 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
682 true);
683 if (err)
684 goto out;
685
686 run_truncate(run, vcn);
687
688 if (vcn > svcn) {
689 err = mi_pack_runs(mi, attr, run, vcn - svcn);
690 if (err)
691 goto out;
692 } else if (le && le->vcn) {
693 u16 le_sz = le16_to_cpu(le->size);
694
695 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +0300696 * NOTE: List entries for one attribute are always
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300697 * the same size. We deal with last entry (vcn==0)
698 * and it is not first in entries array
Kari Argillandere8b8e972021-08-03 14:57:09 +0300699 * (list entry for std attribute always first).
700 * So it is safe to step back.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300701 */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +0300702 mi_remove_attr(NULL, mi, attr);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300703
704 if (!al_remove_le(ni, le)) {
705 err = -EINVAL;
706 goto out;
707 }
708
709 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
710 } else {
711 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
712 mi->dirty = true;
713 }
714
715 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
716
717 if (vcn == new_alen) {
718 attr_b->nres.data_size = cpu_to_le64(new_size);
719 if (new_size < old_valid)
720 attr_b->nres.valid_size =
721 attr_b->nres.data_size;
722 } else {
723 if (new_alloc_tmp <=
724 le64_to_cpu(attr_b->nres.data_size))
725 attr_b->nres.data_size =
726 attr_b->nres.alloc_size;
727 if (new_alloc_tmp <
728 le64_to_cpu(attr_b->nres.valid_size))
729 attr_b->nres.valid_size =
730 attr_b->nres.alloc_size;
731 }
732
733 if (is_ext)
734 le64_sub_cpu(&attr_b->nres.total_size,
735 ((u64)alen << cluster_bits));
736
737 mi_b->dirty = true;
738
739 if (new_alloc_tmp <= new_alloc)
740 goto ok;
741
742 old_size = new_alloc_tmp;
743 vcn = svcn - 1;
744
745 if (le == le_b) {
746 attr = attr_b;
747 mi = mi_b;
748 evcn = svcn - 1;
749 svcn = 0;
750 goto next_le;
751 }
752
753 if (le->type != type || le->name_len != name_len ||
754 memcmp(le_name(le), name, name_len * sizeof(short))) {
755 err = -EINVAL;
756 goto out;
757 }
758
759 err = ni_load_mi(ni, le, &mi);
760 if (err)
761 goto out;
762
763 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
764 if (!attr) {
765 err = -EINVAL;
766 goto out;
767 }
768 goto next_le_1;
769 }
770
771ok:
772 if (new_valid) {
773 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
774
775 if (attr_b->nres.valid_size != valid) {
776 attr_b->nres.valid_size = valid;
777 mi_b->dirty = true;
778 }
779 }
780
781out:
782 if (!err && attr_b && ret)
783 *ret = attr_b;
784
Kari Argillandere8b8e972021-08-03 14:57:09 +0300785 /* Update inode_set_bytes. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300786 if (!err && ((type == ATTR_DATA && !name_len) ||
787 (type == ATTR_ALLOC && name == I30_NAME))) {
788 bool dirty = false;
789
790 if (ni->vfs_inode.i_size != new_size) {
791 ni->vfs_inode.i_size = new_size;
792 dirty = true;
793 }
794
795 if (attr_b && attr_b->non_res) {
796 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
797 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
798 inode_set_bytes(&ni->vfs_inode, new_alloc);
799 dirty = true;
800 }
801 }
802
803 if (dirty) {
804 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
805 mark_inode_dirty(&ni->vfs_inode);
806 }
807 }
808
809 return err;
810}
811
812int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
813 CLST *len, bool *new)
814{
815 int err = 0;
816 struct runs_tree *run = &ni->file.run;
817 struct ntfs_sb_info *sbi;
818 u8 cluster_bits;
819 struct ATTRIB *attr = NULL, *attr_b;
820 struct ATTR_LIST_ENTRY *le, *le_b;
821 struct mft_inode *mi, *mi_b;
822 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
823 u64 total_size;
824 u32 clst_per_frame;
825 bool ok;
826
827 if (new)
828 *new = false;
829
830 down_read(&ni->file.run_lock);
831 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
832 up_read(&ni->file.run_lock);
833
834 if (ok && (*lcn != SPARSE_LCN || !new)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300835 /* Normal way. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300836 return 0;
837 }
838
839 if (!clen)
840 clen = 1;
841
842 if (ok && clen > *len)
843 clen = *len;
844
845 sbi = ni->mi.sbi;
846 cluster_bits = sbi->cluster_bits;
847
848 ni_lock(ni);
849 down_write(&ni->file.run_lock);
850
851 le_b = NULL;
852 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
853 if (!attr_b) {
854 err = -ENOENT;
855 goto out;
856 }
857
858 if (!attr_b->non_res) {
859 *lcn = RESIDENT_LCN;
860 *len = 1;
861 goto out;
862 }
863
864 asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
865 if (vcn >= asize) {
866 err = -EINVAL;
867 goto out;
868 }
869
870 clst_per_frame = 1u << attr_b->nres.c_unit;
871 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
872
873 if (vcn + to_alloc > asize)
874 to_alloc = asize - vcn;
875
876 svcn = le64_to_cpu(attr_b->nres.svcn);
877 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
878
879 attr = attr_b;
880 le = le_b;
881 mi = mi_b;
882
883 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
884 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
885 &mi);
886 if (!attr) {
887 err = -EINVAL;
888 goto out;
889 }
890 svcn = le64_to_cpu(attr->nres.svcn);
891 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
892 }
893
894 err = attr_load_runs(attr, ni, run, NULL);
895 if (err)
896 goto out;
897
898 if (!ok) {
899 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
900 if (ok && (*lcn != SPARSE_LCN || !new)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300901 /* Normal way. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300902 err = 0;
903 goto ok;
904 }
905
906 if (!ok && !new) {
907 *len = 0;
908 err = 0;
909 goto ok;
910 }
911
912 if (ok && clen > *len) {
913 clen = *len;
914 to_alloc = (clen + clst_per_frame - 1) &
915 ~(clst_per_frame - 1);
916 }
917 }
918
919 if (!is_attr_ext(attr_b)) {
920 err = -EINVAL;
921 goto out;
922 }
923
Kari Argillandere8b8e972021-08-03 14:57:09 +0300924 /* Get the last LCN to allocate from. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300925 hint = 0;
926
927 if (vcn > evcn1) {
928 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
929 false)) {
930 err = -ENOMEM;
931 goto out;
932 }
933 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
934 hint = -1;
935 }
936
937 err = attr_allocate_clusters(
938 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
939 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
940 lcn);
941 if (err)
942 goto out;
943 *new = true;
944
945 end = vcn + *len;
946
947 total_size = le64_to_cpu(attr_b->nres.total_size) +
948 ((u64)*len << cluster_bits);
949
950repack:
951 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
952 if (err)
953 goto out;
954
955 attr_b->nres.total_size = cpu_to_le64(total_size);
956 inode_set_bytes(&ni->vfs_inode, total_size);
957 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
958
959 mi_b->dirty = true;
960 mark_inode_dirty(&ni->vfs_inode);
961
Kari Argillandere8b8e972021-08-03 14:57:09 +0300962 /* Stored [vcn : next_svcn) from [vcn : end). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300963 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
964
965 if (end <= evcn1) {
966 if (next_svcn == evcn1) {
Kari Argillandere8b8e972021-08-03 14:57:09 +0300967 /* Normal way. Update attribute and exit. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300968 goto ok;
969 }
Kari Argillandere8b8e972021-08-03 14:57:09 +0300970 /* Add new segment [next_svcn : evcn1 - next_svcn). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300971 if (!ni->attr_list.size) {
972 err = ni_create_attr_list(ni);
973 if (err)
974 goto out;
Kari Argillandere8b8e972021-08-03 14:57:09 +0300975 /* Layout of records is changed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300976 le_b = NULL;
977 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
978 0, NULL, &mi_b);
979 if (!attr_b) {
980 err = -ENOENT;
981 goto out;
982 }
983
984 attr = attr_b;
985 le = le_b;
986 mi = mi_b;
987 goto repack;
988 }
989 }
990
991 svcn = evcn1;
992
Kari Argillandere8b8e972021-08-03 14:57:09 +0300993 /* Estimate next attribute. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +0300994 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
995
996 if (attr) {
997 CLST alloc = bytes_to_cluster(
998 sbi, le64_to_cpu(attr_b->nres.alloc_size));
999 CLST evcn = le64_to_cpu(attr->nres.evcn);
1000
1001 if (end < next_svcn)
1002 end = next_svcn;
1003 while (end > evcn) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001004 /* Remove segment [svcn : evcn). */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03001005 mi_remove_attr(NULL, mi, attr);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001006
1007 if (!al_remove_le(ni, le)) {
1008 err = -EINVAL;
1009 goto out;
1010 }
1011
1012 if (evcn + 1 >= alloc) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001013 /* Last attribute segment. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001014 evcn1 = evcn + 1;
1015 goto ins_ext;
1016 }
1017
1018 if (ni_load_mi(ni, le, &mi)) {
1019 attr = NULL;
1020 goto out;
1021 }
1022
1023 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1024 &le->id);
1025 if (!attr) {
1026 err = -EINVAL;
1027 goto out;
1028 }
1029 svcn = le64_to_cpu(attr->nres.svcn);
1030 evcn = le64_to_cpu(attr->nres.evcn);
1031 }
1032
1033 if (end < svcn)
1034 end = svcn;
1035
1036 err = attr_load_runs(attr, ni, run, &end);
1037 if (err)
1038 goto out;
1039
1040 evcn1 = evcn + 1;
1041 attr->nres.svcn = cpu_to_le64(next_svcn);
1042 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1043 if (err)
1044 goto out;
1045
1046 le->vcn = cpu_to_le64(next_svcn);
1047 ni->attr_list.dirty = true;
1048 mi->dirty = true;
1049
1050 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1051 }
1052ins_ext:
1053 if (evcn1 > next_svcn) {
1054 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1055 next_svcn, evcn1 - next_svcn,
1056 attr_b->flags, &attr, &mi);
1057 if (err)
1058 goto out;
1059 }
1060ok:
1061 run_truncate_around(run, vcn);
1062out:
1063 up_write(&ni->file.run_lock);
1064 ni_unlock(ni);
1065
1066 return err;
1067}
1068
1069int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1070{
1071 u64 vbo;
1072 struct ATTRIB *attr;
1073 u32 data_size;
1074
1075 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1076 if (!attr)
1077 return -EINVAL;
1078
1079 if (attr->non_res)
1080 return E_NTFS_NONRESIDENT;
1081
1082 vbo = page->index << PAGE_SHIFT;
1083 data_size = le32_to_cpu(attr->res.data_size);
1084 if (vbo < data_size) {
1085 const char *data = resident_data(attr);
1086 char *kaddr = kmap_atomic(page);
1087 u32 use = data_size - vbo;
1088
1089 if (use > PAGE_SIZE)
1090 use = PAGE_SIZE;
1091
1092 memcpy(kaddr, data + vbo, use);
1093 memset(kaddr + use, 0, PAGE_SIZE - use);
1094 kunmap_atomic(kaddr);
1095 flush_dcache_page(page);
1096 SetPageUptodate(page);
1097 } else if (!PageUptodate(page)) {
1098 zero_user_segment(page, 0, PAGE_SIZE);
1099 SetPageUptodate(page);
1100 }
1101
1102 return 0;
1103}
1104
1105int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1106{
1107 u64 vbo;
1108 struct mft_inode *mi;
1109 struct ATTRIB *attr;
1110 u32 data_size;
1111
1112 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1113 if (!attr)
1114 return -EINVAL;
1115
1116 if (attr->non_res) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001117 /* Return special error code to check this case. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001118 return E_NTFS_NONRESIDENT;
1119 }
1120
1121 vbo = page->index << PAGE_SHIFT;
1122 data_size = le32_to_cpu(attr->res.data_size);
1123 if (vbo < data_size) {
1124 char *data = resident_data(attr);
1125 char *kaddr = kmap_atomic(page);
1126 u32 use = data_size - vbo;
1127
1128 if (use > PAGE_SIZE)
1129 use = PAGE_SIZE;
1130 memcpy(data + vbo, kaddr, use);
1131 kunmap_atomic(kaddr);
1132 mi->dirty = true;
1133 }
1134 ni->i_valid = data_size;
1135
1136 return 0;
1137}
1138
1139/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001140 * attr_load_runs_vcn - Load runs with VCN.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001141 */
1142int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1143 const __le16 *name, u8 name_len, struct runs_tree *run,
1144 CLST vcn)
1145{
1146 struct ATTRIB *attr;
1147 int err;
1148 CLST svcn, evcn;
1149 u16 ro;
1150
1151 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
Konstantin Komarovd3624462021-08-31 16:57:40 +03001152 if (!attr) {
1153 /* Is record corrupted? */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001154 return -ENOENT;
Konstantin Komarovd3624462021-08-31 16:57:40 +03001155 }
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001156
1157 svcn = le64_to_cpu(attr->nres.svcn);
1158 evcn = le64_to_cpu(attr->nres.evcn);
1159
Konstantin Komarovd3624462021-08-31 16:57:40 +03001160 if (evcn < vcn || vcn < svcn) {
1161 /* Is record corrupted? */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001162 return -EINVAL;
Konstantin Komarovd3624462021-08-31 16:57:40 +03001163 }
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001164
1165 ro = le16_to_cpu(attr->nres.run_off);
1166 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1167 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1168 if (err < 0)
1169 return err;
1170 return 0;
1171}
1172
1173/*
Konstantin Komarovd3624462021-08-31 16:57:40 +03001174 * attr_load_runs_range - Load runs for given range [from to).
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001175 */
1176int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1177 const __le16 *name, u8 name_len, struct runs_tree *run,
1178 u64 from, u64 to)
1179{
1180 struct ntfs_sb_info *sbi = ni->mi.sbi;
1181 u8 cluster_bits = sbi->cluster_bits;
1182 CLST vcn = from >> cluster_bits;
1183 CLST vcn_last = (to - 1) >> cluster_bits;
1184 CLST lcn, clen;
1185 int err;
1186
1187 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1188 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1189 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1190 vcn);
1191 if (err)
1192 return err;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001193 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001194 }
1195 }
1196
1197 return 0;
1198}
1199
1200#ifdef CONFIG_NTFS3_LZX_XPRESS
1201/*
1202 * attr_wof_frame_info
1203 *
Kari Argillandere8b8e972021-08-03 14:57:09 +03001204 * Read header of Xpress/LZX file to get info about frame.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001205 */
1206int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1207 struct runs_tree *run, u64 frame, u64 frames,
1208 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1209{
1210 struct ntfs_sb_info *sbi = ni->mi.sbi;
1211 u64 vbo[2], off[2], wof_size;
1212 u32 voff;
1213 u8 bytes_per_off;
1214 char *addr;
1215 struct page *page;
1216 int i, err;
1217 __le32 *off32;
1218 __le64 *off64;
1219
1220 if (ni->vfs_inode.i_size < 0x100000000ull) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001221 /* File starts with array of 32 bit offsets. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001222 bytes_per_off = sizeof(__le32);
1223 vbo[1] = frame << 2;
1224 *vbo_data = frames << 2;
1225 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001226 /* File starts with array of 64 bit offsets. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001227 bytes_per_off = sizeof(__le64);
1228 vbo[1] = frame << 3;
1229 *vbo_data = frames << 3;
1230 }
1231
1232 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001233 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1234 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001235 */
1236 if (!attr->non_res) {
1237 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1238 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1239 return -EINVAL;
1240 }
1241 addr = resident_data(attr);
1242
1243 if (bytes_per_off == sizeof(__le32)) {
1244 off32 = Add2Ptr(addr, vbo[1]);
1245 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1246 off[1] = le32_to_cpu(off32[0]);
1247 } else {
1248 off64 = Add2Ptr(addr, vbo[1]);
1249 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1250 off[1] = le64_to_cpu(off64[0]);
1251 }
1252
1253 *vbo_data += off[0];
1254 *ondisk_size = off[1] - off[0];
1255 return 0;
1256 }
1257
1258 wof_size = le64_to_cpu(attr->nres.data_size);
1259 down_write(&ni->file.run_lock);
1260 page = ni->file.offs_page;
1261 if (!page) {
1262 page = alloc_page(GFP_KERNEL);
1263 if (!page) {
1264 err = -ENOMEM;
1265 goto out;
1266 }
1267 page->index = -1;
1268 ni->file.offs_page = page;
1269 }
1270 lock_page(page);
1271 addr = page_address(page);
1272
1273 if (vbo[1]) {
1274 voff = vbo[1] & (PAGE_SIZE - 1);
1275 vbo[0] = vbo[1] - bytes_per_off;
1276 i = 0;
1277 } else {
1278 voff = 0;
1279 vbo[0] = 0;
1280 off[0] = 0;
1281 i = 1;
1282 }
1283
1284 do {
1285 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1286
1287 if (index != page->index) {
1288 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1289 u64 to = min(from + PAGE_SIZE, wof_size);
1290
1291 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1292 ARRAY_SIZE(WOF_NAME), run,
1293 from, to);
1294 if (err)
1295 goto out1;
1296
1297 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1298 to - from, REQ_OP_READ);
1299 if (err) {
1300 page->index = -1;
1301 goto out1;
1302 }
1303 page->index = index;
1304 }
1305
1306 if (i) {
1307 if (bytes_per_off == sizeof(__le32)) {
1308 off32 = Add2Ptr(addr, voff);
1309 off[1] = le32_to_cpu(*off32);
1310 } else {
1311 off64 = Add2Ptr(addr, voff);
1312 off[1] = le64_to_cpu(*off64);
1313 }
1314 } else if (!voff) {
1315 if (bytes_per_off == sizeof(__le32)) {
1316 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1317 off[0] = le32_to_cpu(*off32);
1318 } else {
1319 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1320 off[0] = le64_to_cpu(*off64);
1321 }
1322 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001323 /* Two values in one page. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001324 if (bytes_per_off == sizeof(__le32)) {
1325 off32 = Add2Ptr(addr, voff);
1326 off[0] = le32_to_cpu(off32[-1]);
1327 off[1] = le32_to_cpu(off32[0]);
1328 } else {
1329 off64 = Add2Ptr(addr, voff);
1330 off[0] = le64_to_cpu(off64[-1]);
1331 off[1] = le64_to_cpu(off64[0]);
1332 }
1333 break;
1334 }
1335 } while (++i < 2);
1336
1337 *vbo_data += off[0];
1338 *ondisk_size = off[1] - off[0];
1339
1340out1:
1341 unlock_page(page);
1342out:
1343 up_write(&ni->file.run_lock);
1344 return err;
1345}
1346#endif
1347
1348/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001349 * attr_is_frame_compressed - Used to detect compressed frame.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001350 */
1351int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1352 CLST frame, CLST *clst_data)
1353{
1354 int err;
1355 u32 clst_frame;
1356 CLST clen, lcn, vcn, alen, slen, vcn_next;
1357 size_t idx;
1358 struct runs_tree *run;
1359
1360 *clst_data = 0;
1361
1362 if (!is_attr_compressed(attr))
1363 return 0;
1364
1365 if (!attr->non_res)
1366 return 0;
1367
1368 clst_frame = 1u << attr->nres.c_unit;
1369 vcn = frame * clst_frame;
1370 run = &ni->file.run;
1371
1372 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1373 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1374 attr->name_len, run, vcn);
1375 if (err)
1376 return err;
1377
1378 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1379 return -EINVAL;
1380 }
1381
1382 if (lcn == SPARSE_LCN) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001383 /* Sparsed frame. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001384 return 0;
1385 }
1386
1387 if (clen >= clst_frame) {
1388 /*
1389 * The frame is not compressed 'cause
Kari Argillandere8b8e972021-08-03 14:57:09 +03001390 * it does not contain any sparse clusters.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001391 */
1392 *clst_data = clst_frame;
1393 return 0;
1394 }
1395
1396 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1397 slen = 0;
1398 *clst_data = clen;
1399
1400 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001401 * The frame is compressed if *clst_data + slen >= clst_frame.
1402 * Check next fragments.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001403 */
1404 while ((vcn += clen) < alen) {
1405 vcn_next = vcn;
1406
1407 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1408 vcn_next != vcn) {
1409 err = attr_load_runs_vcn(ni, attr->type,
1410 attr_name(attr),
1411 attr->name_len, run, vcn_next);
1412 if (err)
1413 return err;
1414 vcn = vcn_next;
1415
1416 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1417 return -EINVAL;
1418 }
1419
1420 if (lcn == SPARSE_LCN) {
1421 slen += clen;
1422 } else {
1423 if (slen) {
1424 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001425 * Data_clusters + sparse_clusters =
1426 * not enough for frame.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001427 */
1428 return -EINVAL;
1429 }
1430 *clst_data += clen;
1431 }
1432
1433 if (*clst_data + slen >= clst_frame) {
1434 if (!slen) {
1435 /*
1436 * There is no sparsed clusters in this frame
Kari Argillandere8b8e972021-08-03 14:57:09 +03001437 * so it is not compressed.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001438 */
1439 *clst_data = clst_frame;
1440 } else {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001441 /* Frame is compressed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001442 }
1443 break;
1444 }
1445 }
1446
1447 return 0;
1448}
1449
1450/*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001451 * attr_allocate_frame - Allocate/free clusters for @frame.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001452 *
Kari Argillandere8b8e972021-08-03 14:57:09 +03001453 * Assumed: down_write(&ni->file.run_lock);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001454 */
1455int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1456 u64 new_valid)
1457{
1458 int err = 0;
1459 struct runs_tree *run = &ni->file.run;
1460 struct ntfs_sb_info *sbi = ni->mi.sbi;
1461 struct ATTRIB *attr = NULL, *attr_b;
1462 struct ATTR_LIST_ENTRY *le, *le_b;
1463 struct mft_inode *mi, *mi_b;
1464 CLST svcn, evcn1, next_svcn, lcn, len;
1465 CLST vcn, end, clst_data;
1466 u64 total_size, valid_size, data_size;
1467
1468 le_b = NULL;
1469 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1470 if (!attr_b)
1471 return -ENOENT;
1472
1473 if (!is_attr_ext(attr_b))
1474 return -EINVAL;
1475
1476 vcn = frame << NTFS_LZNT_CUNIT;
1477 total_size = le64_to_cpu(attr_b->nres.total_size);
1478
1479 svcn = le64_to_cpu(attr_b->nres.svcn);
1480 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1481 data_size = le64_to_cpu(attr_b->nres.data_size);
1482
1483 if (svcn <= vcn && vcn < evcn1) {
1484 attr = attr_b;
1485 le = le_b;
1486 mi = mi_b;
1487 } else if (!le_b) {
1488 err = -EINVAL;
1489 goto out;
1490 } else {
1491 le = le_b;
1492 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1493 &mi);
1494 if (!attr) {
1495 err = -EINVAL;
1496 goto out;
1497 }
1498 svcn = le64_to_cpu(attr->nres.svcn);
1499 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1500 }
1501
1502 err = attr_load_runs(attr, ni, run, NULL);
1503 if (err)
1504 goto out;
1505
1506 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1507 if (err)
1508 goto out;
1509
1510 total_size -= (u64)clst_data << sbi->cluster_bits;
1511
1512 len = bytes_to_cluster(sbi, compr_size);
1513
1514 if (len == clst_data)
1515 goto out;
1516
1517 if (len < clst_data) {
1518 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1519 NULL, true);
1520 if (err)
1521 goto out;
1522
1523 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1524 false)) {
1525 err = -ENOMEM;
1526 goto out;
1527 }
1528 end = vcn + clst_data;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001529 /* Run contains updated range [vcn + len : end). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001530 } else {
1531 CLST alen, hint = 0;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001532 /* Get the last LCN to allocate from. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001533 if (vcn + clst_data &&
1534 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1535 NULL)) {
1536 hint = -1;
1537 }
1538
1539 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1540 hint + 1, len - clst_data, NULL, 0,
1541 &alen, 0, &lcn);
1542 if (err)
1543 goto out;
1544
1545 end = vcn + len;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001546 /* Run contains updated range [vcn + clst_data : end). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001547 }
1548
1549 total_size += (u64)len << sbi->cluster_bits;
1550
1551repack:
1552 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1553 if (err)
1554 goto out;
1555
1556 attr_b->nres.total_size = cpu_to_le64(total_size);
1557 inode_set_bytes(&ni->vfs_inode, total_size);
1558
1559 mi_b->dirty = true;
1560 mark_inode_dirty(&ni->vfs_inode);
1561
Kari Argillandere8b8e972021-08-03 14:57:09 +03001562 /* Stored [vcn : next_svcn) from [vcn : end). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001563 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1564
1565 if (end <= evcn1) {
1566 if (next_svcn == evcn1) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001567 /* Normal way. Update attribute and exit. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001568 goto ok;
1569 }
Kari Argillandere8b8e972021-08-03 14:57:09 +03001570 /* Add new segment [next_svcn : evcn1 - next_svcn). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001571 if (!ni->attr_list.size) {
1572 err = ni_create_attr_list(ni);
1573 if (err)
1574 goto out;
Kari Argillandere8b8e972021-08-03 14:57:09 +03001575 /* Layout of records is changed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001576 le_b = NULL;
1577 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1578 0, NULL, &mi_b);
1579 if (!attr_b) {
1580 err = -ENOENT;
1581 goto out;
1582 }
1583
1584 attr = attr_b;
1585 le = le_b;
1586 mi = mi_b;
1587 goto repack;
1588 }
1589 }
1590
1591 svcn = evcn1;
1592
Kari Argillandere8b8e972021-08-03 14:57:09 +03001593 /* Estimate next attribute. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001594 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1595
1596 if (attr) {
1597 CLST alloc = bytes_to_cluster(
1598 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1599 CLST evcn = le64_to_cpu(attr->nres.evcn);
1600
1601 if (end < next_svcn)
1602 end = next_svcn;
1603 while (end > evcn) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001604 /* Remove segment [svcn : evcn). */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03001605 mi_remove_attr(NULL, mi, attr);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001606
1607 if (!al_remove_le(ni, le)) {
1608 err = -EINVAL;
1609 goto out;
1610 }
1611
1612 if (evcn + 1 >= alloc) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001613 /* Last attribute segment. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001614 evcn1 = evcn + 1;
1615 goto ins_ext;
1616 }
1617
1618 if (ni_load_mi(ni, le, &mi)) {
1619 attr = NULL;
1620 goto out;
1621 }
1622
1623 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1624 &le->id);
1625 if (!attr) {
1626 err = -EINVAL;
1627 goto out;
1628 }
1629 svcn = le64_to_cpu(attr->nres.svcn);
1630 evcn = le64_to_cpu(attr->nres.evcn);
1631 }
1632
1633 if (end < svcn)
1634 end = svcn;
1635
1636 err = attr_load_runs(attr, ni, run, &end);
1637 if (err)
1638 goto out;
1639
1640 evcn1 = evcn + 1;
1641 attr->nres.svcn = cpu_to_le64(next_svcn);
1642 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1643 if (err)
1644 goto out;
1645
1646 le->vcn = cpu_to_le64(next_svcn);
1647 ni->attr_list.dirty = true;
1648 mi->dirty = true;
1649
1650 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1651 }
1652ins_ext:
1653 if (evcn1 > next_svcn) {
1654 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1655 next_svcn, evcn1 - next_svcn,
1656 attr_b->flags, &attr, &mi);
1657 if (err)
1658 goto out;
1659 }
1660ok:
1661 run_truncate_around(run, vcn);
1662out:
1663 if (new_valid > data_size)
1664 new_valid = data_size;
1665
1666 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1667 if (new_valid != valid_size) {
1668 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1669 mi_b->dirty = true;
1670 }
1671
1672 return err;
1673}
1674
Kari Argillandere8b8e972021-08-03 14:57:09 +03001675/*
1676 * attr_collapse_range - Collapse range in file.
1677 */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001678int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1679{
1680 int err = 0;
1681 struct runs_tree *run = &ni->file.run;
1682 struct ntfs_sb_info *sbi = ni->mi.sbi;
1683 struct ATTRIB *attr = NULL, *attr_b;
1684 struct ATTR_LIST_ENTRY *le, *le_b;
1685 struct mft_inode *mi, *mi_b;
1686 CLST svcn, evcn1, len, dealloc, alen;
1687 CLST vcn, end;
1688 u64 valid_size, data_size, alloc_size, total_size;
1689 u32 mask;
1690 __le16 a_flags;
1691
1692 if (!bytes)
1693 return 0;
1694
1695 le_b = NULL;
1696 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1697 if (!attr_b)
1698 return -ENOENT;
1699
1700 if (!attr_b->non_res) {
1701 /* Attribute is resident. Nothing to do? */
1702 return 0;
1703 }
1704
1705 data_size = le64_to_cpu(attr_b->nres.data_size);
1706 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1707 a_flags = attr_b->flags;
1708
1709 if (is_attr_ext(attr_b)) {
1710 total_size = le64_to_cpu(attr_b->nres.total_size);
1711 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1712 } else {
1713 total_size = alloc_size;
1714 mask = sbi->cluster_mask;
1715 }
1716
1717 if ((vbo & mask) || (bytes & mask)) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001718 /* Allow to collapse only cluster aligned ranges. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001719 return -EINVAL;
1720 }
1721
1722 if (vbo > data_size)
1723 return -EINVAL;
1724
1725 down_write(&ni->file.run_lock);
1726
1727 if (vbo + bytes >= data_size) {
1728 u64 new_valid = min(ni->i_valid, vbo);
1729
Kari Argillandere8b8e972021-08-03 14:57:09 +03001730 /* Simple truncate file at 'vbo'. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001731 truncate_setsize(&ni->vfs_inode, vbo);
1732 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1733 &new_valid, true, NULL);
1734
1735 if (!err && new_valid < ni->i_valid)
1736 ni->i_valid = new_valid;
1737
1738 goto out;
1739 }
1740
1741 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03001742 * Enumerate all attribute segments and collapse.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001743 */
1744 alen = alloc_size >> sbi->cluster_bits;
1745 vcn = vbo >> sbi->cluster_bits;
1746 len = bytes >> sbi->cluster_bits;
1747 end = vcn + len;
1748 dealloc = 0;
1749
1750 svcn = le64_to_cpu(attr_b->nres.svcn);
1751 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1752
1753 if (svcn <= vcn && vcn < evcn1) {
1754 attr = attr_b;
1755 le = le_b;
1756 mi = mi_b;
1757 } else if (!le_b) {
1758 err = -EINVAL;
1759 goto out;
1760 } else {
1761 le = le_b;
1762 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1763 &mi);
1764 if (!attr) {
1765 err = -EINVAL;
1766 goto out;
1767 }
1768
1769 svcn = le64_to_cpu(attr->nres.svcn);
1770 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1771 }
1772
1773 for (;;) {
1774 if (svcn >= end) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001775 /* Shift VCN- */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001776 attr->nres.svcn = cpu_to_le64(svcn - len);
1777 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1778 if (le) {
1779 le->vcn = attr->nres.svcn;
1780 ni->attr_list.dirty = true;
1781 }
1782 mi->dirty = true;
1783 } else if (svcn < vcn || end < evcn1) {
1784 CLST vcn1, eat, next_svcn;
1785
Kari Argillandere8b8e972021-08-03 14:57:09 +03001786 /* Collapse a part of this attribute segment. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001787 err = attr_load_runs(attr, ni, run, &svcn);
1788 if (err)
1789 goto out;
1790 vcn1 = max(vcn, svcn);
1791 eat = min(end, evcn1) - vcn1;
1792
1793 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1794 true);
1795 if (err)
1796 goto out;
1797
1798 if (!run_collapse_range(run, vcn1, eat)) {
1799 err = -ENOMEM;
1800 goto out;
1801 }
1802
1803 if (svcn >= vcn) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001804 /* Shift VCN */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001805 attr->nres.svcn = cpu_to_le64(vcn);
1806 if (le) {
1807 le->vcn = attr->nres.svcn;
1808 ni->attr_list.dirty = true;
1809 }
1810 }
1811
1812 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1813 if (err)
1814 goto out;
1815
1816 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1817 if (next_svcn + eat < evcn1) {
1818 err = ni_insert_nonresident(
1819 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1820 evcn1 - eat - next_svcn, a_flags, &attr,
1821 &mi);
1822 if (err)
1823 goto out;
1824
Kari Argillandere8b8e972021-08-03 14:57:09 +03001825 /* Layout of records maybe changed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001826 attr_b = NULL;
1827 le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1828 &next_svcn);
1829 if (!le) {
1830 err = -EINVAL;
1831 goto out;
1832 }
1833 }
1834
Kari Argillandere8b8e972021-08-03 14:57:09 +03001835 /* Free all allocated memory. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001836 run_truncate(run, 0);
1837 } else {
1838 u16 le_sz;
1839 u16 roff = le16_to_cpu(attr->nres.run_off);
1840
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001841 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1842 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1843 le32_to_cpu(attr->size) - roff);
1844
Kari Argillandere8b8e972021-08-03 14:57:09 +03001845 /* Delete this attribute segment. */
Konstantin Komarov78ab59f2021-08-31 18:52:39 +03001846 mi_remove_attr(NULL, mi, attr);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001847 if (!le)
1848 break;
1849
1850 le_sz = le16_to_cpu(le->size);
1851 if (!al_remove_le(ni, le)) {
1852 err = -EINVAL;
1853 goto out;
1854 }
1855
1856 if (evcn1 >= alen)
1857 break;
1858
1859 if (!svcn) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03001860 /* Load next record that contains this attribute. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001861 if (ni_load_mi(ni, le, &mi)) {
1862 err = -EINVAL;
1863 goto out;
1864 }
1865
Kari Argillandere8b8e972021-08-03 14:57:09 +03001866 /* Look for required attribute. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001867 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1868 0, &le->id);
1869 if (!attr) {
1870 err = -EINVAL;
1871 goto out;
1872 }
1873 goto next_attr;
1874 }
1875 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1876 }
1877
1878 if (evcn1 >= alen)
1879 break;
1880
1881 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1882 if (!attr) {
1883 err = -EINVAL;
1884 goto out;
1885 }
1886
1887next_attr:
1888 svcn = le64_to_cpu(attr->nres.svcn);
1889 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1890 }
1891
1892 if (!attr_b) {
1893 le_b = NULL;
1894 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1895 &mi_b);
1896 if (!attr_b) {
1897 err = -ENOENT;
1898 goto out;
1899 }
1900 }
1901
1902 data_size -= bytes;
1903 valid_size = ni->i_valid;
1904 if (vbo + bytes <= valid_size)
1905 valid_size -= bytes;
1906 else if (vbo < valid_size)
1907 valid_size = vbo;
1908
1909 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1910 attr_b->nres.data_size = cpu_to_le64(data_size);
1911 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1912 total_size -= (u64)dealloc << sbi->cluster_bits;
1913 if (is_attr_ext(attr_b))
1914 attr_b->nres.total_size = cpu_to_le64(total_size);
1915 mi_b->dirty = true;
1916
Kari Argillandere8b8e972021-08-03 14:57:09 +03001917 /* Update inode size. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001918 ni->i_valid = valid_size;
1919 ni->vfs_inode.i_size = data_size;
1920 inode_set_bytes(&ni->vfs_inode, total_size);
1921 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1922 mark_inode_dirty(&ni->vfs_inode);
1923
1924out:
1925 up_write(&ni->file.run_lock);
1926 if (err)
1927 make_bad_inode(&ni->vfs_inode);
1928
1929 return err;
1930}
1931
Kari Argillandere8b8e972021-08-03 14:57:09 +03001932/*
1933 * attr_punch_hole
1934 *
1935 * Not for normal files.
1936 */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001937int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1938{
1939 int err = 0;
1940 struct runs_tree *run = &ni->file.run;
1941 struct ntfs_sb_info *sbi = ni->mi.sbi;
1942 struct ATTRIB *attr = NULL, *attr_b;
1943 struct ATTR_LIST_ENTRY *le, *le_b;
1944 struct mft_inode *mi, *mi_b;
1945 CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1946 u64 total_size, alloc_size;
1947 u32 mask;
1948
1949 if (!bytes)
1950 return 0;
1951
1952 le_b = NULL;
1953 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1954 if (!attr_b)
1955 return -ENOENT;
1956
1957 if (!attr_b->non_res) {
1958 u32 data_size = le32_to_cpu(attr->res.data_size);
1959 u32 from, to;
1960
1961 if (vbo > data_size)
1962 return 0;
1963
1964 from = vbo;
Kari Argillander6e3331e2021-09-07 17:28:42 +03001965 to = min_t(u64, vbo + bytes, data_size);
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001966 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1967 return 0;
1968 }
1969
1970 if (!is_attr_ext(attr_b))
1971 return -EOPNOTSUPP;
1972
1973 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1974 total_size = le64_to_cpu(attr_b->nres.total_size);
1975
1976 if (vbo >= alloc_size) {
Konstantin Komarovd3624462021-08-31 16:57:40 +03001977 /* NOTE: It is allowed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001978 return 0;
1979 }
1980
1981 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1982
1983 bytes += vbo;
1984 if (bytes > alloc_size)
1985 bytes = alloc_size;
1986 bytes -= vbo;
1987
1988 if ((vbo & mask) || (bytes & mask)) {
Konstantin Komarovd3624462021-08-31 16:57:40 +03001989 /* We have to zero a range(s). */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001990 if (frame_size == NULL) {
Konstantin Komarovd3624462021-08-31 16:57:40 +03001991 /* Caller insists range is aligned. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03001992 return -EINVAL;
1993 }
1994 *frame_size = mask + 1;
1995 return E_NTFS_NOTALIGNED;
1996 }
1997
1998 down_write(&ni->file.run_lock);
1999 /*
Kari Argillandere8b8e972021-08-03 14:57:09 +03002000 * Enumerate all attribute segments and punch hole where necessary.
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03002001 */
2002 alen = alloc_size >> sbi->cluster_bits;
2003 vcn = vbo >> sbi->cluster_bits;
2004 len = bytes >> sbi->cluster_bits;
2005 end = vcn + len;
2006 dealloc = 0;
2007
2008 svcn = le64_to_cpu(attr_b->nres.svcn);
2009 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2010
2011 if (svcn <= vcn && vcn < evcn1) {
2012 attr = attr_b;
2013 le = le_b;
2014 mi = mi_b;
2015 } else if (!le_b) {
2016 err = -EINVAL;
2017 goto out;
2018 } else {
2019 le = le_b;
2020 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2021 &mi);
2022 if (!attr) {
2023 err = -EINVAL;
2024 goto out;
2025 }
2026
2027 svcn = le64_to_cpu(attr->nres.svcn);
2028 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2029 }
2030
2031 while (svcn < end) {
2032 CLST vcn1, zero, dealloc2;
2033
2034 err = attr_load_runs(attr, ni, run, &svcn);
2035 if (err)
2036 goto out;
2037 vcn1 = max(vcn, svcn);
2038 zero = min(end, evcn1) - vcn1;
2039
2040 dealloc2 = dealloc;
2041 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2042 if (err)
2043 goto out;
2044
2045 if (dealloc2 == dealloc) {
Kari Argillandere8b8e972021-08-03 14:57:09 +03002046 /* Looks like the required range is already sparsed. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03002047 } else {
2048 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2049 false)) {
2050 err = -ENOMEM;
2051 goto out;
2052 }
2053
2054 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2055 if (err)
2056 goto out;
2057 }
Kari Argillandere8b8e972021-08-03 14:57:09 +03002058 /* Free all allocated memory. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03002059 run_truncate(run, 0);
2060
2061 if (evcn1 >= alen)
2062 break;
2063
2064 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2065 if (!attr) {
2066 err = -EINVAL;
2067 goto out;
2068 }
2069
2070 svcn = le64_to_cpu(attr->nres.svcn);
2071 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2072 }
2073
2074 total_size -= (u64)dealloc << sbi->cluster_bits;
2075 attr_b->nres.total_size = cpu_to_le64(total_size);
2076 mi_b->dirty = true;
2077
Kari Argillandere8b8e972021-08-03 14:57:09 +03002078 /* Update inode size. */
Konstantin Komarovbe71b5c2021-08-13 17:21:30 +03002079 inode_set_bytes(&ni->vfs_inode, total_size);
2080 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2081 mark_inode_dirty(&ni->vfs_inode);
2082
2083out:
2084 up_write(&ni->file.run_lock);
2085 if (err)
2086 make_bad_inode(&ni->vfs_inode);
2087
2088 return err;
2089}