blob: 8ea680dba61e39c4a41b821ae7b048c6a716ed98 [file] [log] [blame]
Thomas Gleixner2b27bdc2019-05-29 16:57:50 -07001// SPDX-License-Identifier: GPL-2.0-only
Artem Bityutskiy1e517642008-07-14 19:08:37 +03002/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
Artem Bityutskiy1e517642008-07-14 19:08:37 +03007 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11/*
12 * This file implements UBIFS journal.
13 *
14 * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
15 * length and position, while a bud logical eraseblock is any LEB in the main
16 * area. Buds contain file system data - data nodes, inode nodes, etc. The log
17 * contains only references to buds and some other stuff like commit
18 * start node. The idea is that when we commit the journal, we do
19 * not copy the data, the buds just become indexed. Since after the commit the
20 * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
21 * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
22 * become leafs in the future.
23 *
24 * The journal is multi-headed because we want to write data to the journal as
25 * optimally as possible. It is nice to have nodes belonging to the same inode
26 * in one LEB, so we may write data owned by different inodes to different
27 * journal heads, although at present only one data head is used.
28 *
29 * For recovery reasons, the base head contains all inode nodes, all directory
30 * entry nodes and all truncate nodes. This means that the other heads contain
31 * only data nodes.
32 *
33 * Bud LEBs may be half-indexed. For example, if the bud was not full at the
34 * time of commit, the bud is retained to continue to be used in the journal,
35 * even though the "front" of the LEB is now indexed. In that case, the log
36 * reference contains the offset where the bud starts for the purposes of the
37 * journal.
38 *
39 * The journal size has to be limited, because the larger is the journal, the
40 * longer it takes to mount UBIFS (scanning the journal) and the more memory it
41 * takes (indexing in the TNC).
42 *
43 * All the journal write operations like 'ubifs_jnl_update()' here, which write
44 * multiple UBIFS nodes to the journal at one go, are atomic with respect to
45 * unclean reboots. Should the unclean reboot happen, the recovery code drops
46 * all the nodes.
47 */
48
49#include "ubifs.h"
50
51/**
52 * zero_ino_node_unused - zero out unused fields of an on-flash inode node.
53 * @ino: the inode to zero out
54 */
55static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
56{
57 memset(ino->padding1, 0, 4);
58 memset(ino->padding2, 0, 26);
59}
60
61/**
62 * zero_dent_node_unused - zero out unused fields of an on-flash directory
63 * entry node.
64 * @dent: the directory entry to zero out
65 */
66static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
67{
68 dent->padding1 = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +030069}
70
71/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +030072 * zero_trun_node_unused - zero out unused fields of an on-flash truncation
73 * node.
74 * @trun: the truncation node to zero out
75 */
76static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
77{
78 memset(trun->padding, 0, 12);
79}
80
Sascha Hauer6a98bc42018-09-07 14:36:36 +020081static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum)
82{
83 if (ubifs_authenticated(c))
84 ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c));
85}
86
Artem Bityutskiy1e517642008-07-14 19:08:37 +030087/**
88 * reserve_space - reserve space in the journal.
89 * @c: UBIFS file-system description object
90 * @jhead: journal head number
91 * @len: node length
92 *
93 * This function reserves space in journal head @head. If the reservation
94 * succeeded, the journal head stays locked and later has to be unlocked using
Sascha Hauer671b9b72018-05-14 10:18:15 +020095 * 'release_head()'. Returns zero in case of success, %-EAGAIN if commit has to
96 * be done, and other negative error codes in case of other failures.
Artem Bityutskiy1e517642008-07-14 19:08:37 +030097 */
98static int reserve_space(struct ubifs_info *c, int jhead, int len)
99{
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300101 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
102
103 /*
104 * Typically, the base head has smaller nodes written to it, so it is
105 * better to try to allocate space at the ends of eraseblocks. This is
106 * what the squeeze parameter does.
107 */
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200108 ubifs_assert(c, !c->ro_media && !c->ro_mount);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300109 squeeze = (jhead == BASEHD);
110again:
111 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
112
Artem Bityutskiy2680d722010-09-17 16:44:28 +0300113 if (c->ro_error) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300114 err = -EROFS;
115 goto out_unlock;
116 }
117
118 avail = c->leb_size - wbuf->offs - wbuf->used;
119 if (wbuf->lnum != -1 && avail >= len)
120 return 0;
121
122 /*
123 * Write buffer wasn't seek'ed or there is no enough space - look for an
124 * LEB with some empty space.
125 */
Artem Bityutskiy3edaae72009-03-03 19:22:53 +0200126 lnum = ubifs_find_free_space(c, len, &offs, squeeze);
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300127 if (lnum >= 0)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300128 goto out;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300129
130 err = lnum;
131 if (err != -ENOSPC)
132 goto out_unlock;
133
134 /*
135 * No free space, we have to run garbage collector to make
136 * some. But the write-buffer mutex has to be unlocked because
137 * GC also takes it.
138 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300139 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300140 mutex_unlock(&wbuf->io_mutex);
141
142 lnum = ubifs_garbage_collect(c, 0);
143 if (lnum < 0) {
144 err = lnum;
145 if (err != -ENOSPC)
146 return err;
147
148 /*
149 * GC could not make a free LEB. But someone else may
150 * have allocated new bud for this journal head,
151 * because we dropped @wbuf->io_mutex, so try once
152 * again.
153 */
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300154 dbg_jnl("GC couldn't make a free LEB for jhead %s",
155 dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300156 if (retries++ < 2) {
157 dbg_jnl("retry (%d)", retries);
158 goto again;
159 }
160
161 dbg_jnl("return -ENOSPC");
162 return err;
163 }
164
165 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300166 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300167 avail = c->leb_size - wbuf->offs - wbuf->used;
168
169 if (wbuf->lnum != -1 && avail >= len) {
170 /*
171 * Someone else has switched the journal head and we have
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200172 * enough space now. This happens when more than one process is
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300173 * trying to write to the same journal head at the same time.
174 */
175 dbg_jnl("return LEB %d back, already have LEB %d:%d",
176 lnum, wbuf->lnum, wbuf->offs + wbuf->used);
177 err = ubifs_return_leb(c, lnum);
178 if (err)
179 goto out_unlock;
180 return 0;
181 }
182
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300183 offs = 0;
184
185out:
Artem Bityutskiycb14a182011-05-15 14:51:54 +0300186 /*
187 * Make sure we synchronize the write-buffer before we add the new bud
188 * to the log. Otherwise we may have a power cut after the log
189 * reference node for the last bud (@lnum) is written but before the
190 * write-buffer data are written to the next-to-last bud
191 * (@wbuf->lnum). And the effect would be that the recovery would see
192 * that there is corruption in the next-to-last bud.
193 */
194 err = ubifs_wbuf_sync_nolock(wbuf);
195 if (err)
196 goto out_return;
197 err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
198 if (err)
199 goto out_return;
Richard Weinbergerb36a2612012-05-14 17:55:51 +0200200 err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300201 if (err)
202 goto out_unlock;
203
204 return 0;
205
206out_unlock:
207 mutex_unlock(&wbuf->io_mutex);
208 return err;
209
210out_return:
211 /* An error occurred and the LEB has to be returned to lprops */
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200212 ubifs_assert(c, err < 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300213 err1 = ubifs_return_leb(c, lnum);
214 if (err1 && err == -EAGAIN)
215 /*
216 * Return original error code only if it is not %-EAGAIN,
217 * which is not really an error. Otherwise, return the error
218 * code of 'ubifs_return_leb()'.
219 */
220 err = err1;
221 mutex_unlock(&wbuf->io_mutex);
222 return err;
223}
224
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200225static int ubifs_hash_nodes(struct ubifs_info *c, void *node,
226 int len, struct shash_desc *hash)
227{
228 int auth_node_size = ubifs_auth_node_sz(c);
229 int err;
230
231 while (1) {
232 const struct ubifs_ch *ch = node;
233 int nodelen = le32_to_cpu(ch->len);
234
235 ubifs_assert(c, len >= auth_node_size);
236
237 if (len == auth_node_size)
238 break;
239
240 ubifs_assert(c, len > nodelen);
241 ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC));
242
243 err = ubifs_shash_update(c, hash, (void *)node, nodelen);
244 if (err)
245 return err;
246
247 node += ALIGN(nodelen, 8);
248 len -= ALIGN(nodelen, 8);
249 }
250
251 return ubifs_prepare_auth_node(c, node, hash);
252}
253
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300254/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300255 * write_head - write data to a journal head.
256 * @c: UBIFS file-system description object
257 * @jhead: journal head
258 * @buf: buffer to write
259 * @len: length to write
260 * @lnum: LEB number written is returned here
261 * @offs: offset written is returned here
262 * @sync: non-zero if the write-buffer has to by synchronized
263 *
Sascha Hauer83407432018-09-07 14:36:28 +0200264 * This function writes data to the reserved space of journal head @jhead.
265 * Returns zero in case of success and a negative error code in case of
266 * failure.
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300267 */
268static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
269 int *lnum, int *offs, int sync)
270{
271 int err;
272 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
273
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200274 ubifs_assert(c, jhead != GCHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300275
276 *lnum = c->jheads[jhead].wbuf.lnum;
277 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
Artem Bityutskiy77a7ae52009-09-15 15:03:51 +0300278 dbg_jnl("jhead %s, LEB %d:%d, len %d",
279 dbg_jhead(jhead), *lnum, *offs, len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300280
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200281 if (ubifs_authenticated(c)) {
282 err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash);
283 if (err)
284 return err;
285 }
286
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300287 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
288 if (err)
289 return err;
290 if (sync)
291 err = ubifs_wbuf_sync_nolock(wbuf);
292 return err;
293}
294
295/**
296 * make_reservation - reserve journal space.
297 * @c: UBIFS file-system description object
298 * @jhead: journal head
299 * @len: how many bytes to reserve
300 *
301 * This function makes space reservation in journal head @jhead. The function
302 * takes the commit lock and locks the journal head, and the caller has to
303 * unlock the head and finish the reservation with 'finish_reservation()'.
304 * Returns zero in case of success and a negative error code in case of
305 * failure.
306 *
307 * Note, the journal head may be unlocked as soon as the data is written, while
308 * the commit lock has to be released after the data has been added to the
309 * TNC.
310 */
311static int make_reservation(struct ubifs_info *c, int jhead, int len)
312{
313 int err, cmt_retries = 0, nospc_retries = 0;
314
315again:
316 down_read(&c->commit_sem);
317 err = reserve_space(c, jhead, len);
318 if (!err)
Richard Weinberger49d2e052018-08-13 15:14:45 +0200319 /* c->commit_sem will get released via finish_reservation(). */
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300320 return 0;
321 up_read(&c->commit_sem);
322
323 if (err == -ENOSPC) {
324 /*
325 * GC could not make any progress. We should try to commit
326 * once because it could make some dirty space and GC would
327 * make progress, so make the error -EAGAIN so that the below
328 * will commit and re-try.
329 */
330 if (nospc_retries++ < 2) {
331 dbg_jnl("no space, retry");
332 err = -EAGAIN;
333 }
334
335 /*
336 * This means that the budgeting is incorrect. We always have
337 * to be able to write to the media, because all operations are
338 * budgeted. Deletions are not budgeted, though, but we reserve
339 * an extra LEB for them.
340 */
341 }
342
343 if (err != -EAGAIN)
344 goto out;
345
346 /*
347 * -EAGAIN means that the journal is full or too large, or the above
348 * code wants to do one commit. Do this and re-try.
349 */
350 if (cmt_retries > 128) {
351 /*
352 * This should not happen unless the journal size limitations
353 * are too tough.
354 */
Sheng Yong235c3622015-03-20 10:39:42 +0000355 ubifs_err(c, "stuck in space allocation");
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300356 err = -ENOSPC;
357 goto out;
358 } else if (cmt_retries > 32)
Sheng Yong235c3622015-03-20 10:39:42 +0000359 ubifs_warn(c, "too many space allocation re-tries (%d)",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300360 cmt_retries);
361
362 dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
363 cmt_retries);
364 cmt_retries += 1;
365
366 err = ubifs_run_commit(c);
367 if (err)
368 return err;
369 goto again;
370
371out:
Sheng Yong235c3622015-03-20 10:39:42 +0000372 ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d",
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300373 len, jhead, err);
374 if (err == -ENOSPC) {
375 /* This are some budgeting problems, print useful information */
376 down_write(&c->commit_sem);
Artem Bityutskiy7c46d0a2012-05-16 19:04:54 +0300377 dump_stack();
Artem Bityutskiyedf6be22012-05-16 19:15:56 +0300378 ubifs_dump_budg(c, &c->bi);
379 ubifs_dump_lprops(c);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300380 cmt_retries = dbg_check_lprops(c);
381 up_write(&c->commit_sem);
382 }
383 return err;
384}
385
386/**
387 * release_head - release a journal head.
388 * @c: UBIFS file-system description object
389 * @jhead: journal head
390 *
391 * This function releases journal head @jhead which was locked by
392 * the 'make_reservation()' function. It has to be called after each successful
393 * 'make_reservation()' invocation.
394 */
395static inline void release_head(struct ubifs_info *c, int jhead)
396{
397 mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
398}
399
400/**
401 * finish_reservation - finish a reservation.
402 * @c: UBIFS file-system description object
403 *
404 * This function finishes journal space reservation. It must be called after
405 * 'make_reservation()'.
406 */
407static void finish_reservation(struct ubifs_info *c)
408{
409 up_read(&c->commit_sem);
410}
411
412/**
413 * get_dent_type - translate VFS inode mode to UBIFS directory entry type.
414 * @mode: inode mode
415 */
416static int get_dent_type(int mode)
417{
418 switch (mode & S_IFMT) {
419 case S_IFREG:
420 return UBIFS_ITYPE_REG;
421 case S_IFDIR:
422 return UBIFS_ITYPE_DIR;
423 case S_IFLNK:
424 return UBIFS_ITYPE_LNK;
425 case S_IFBLK:
426 return UBIFS_ITYPE_BLK;
427 case S_IFCHR:
428 return UBIFS_ITYPE_CHR;
429 case S_IFIFO:
430 return UBIFS_ITYPE_FIFO;
431 case S_IFSOCK:
432 return UBIFS_ITYPE_SOCK;
433 default:
434 BUG();
435 }
436 return 0;
437}
438
439/**
440 * pack_inode - pack an inode node.
441 * @c: UBIFS file-system description object
442 * @ino: buffer in which to pack inode node
443 * @inode: inode to pack
444 * @last: indicates the last node of the group
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300445 */
446static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300447 const struct inode *inode, int last)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300448{
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300449 int data_len = 0, last_reference = !inode->i_nlink;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300450 struct ubifs_inode *ui = ubifs_inode(inode);
451
452 ino->ch.node_type = UBIFS_INO_NODE;
453 ino_key_init_flash(c, &ino->key, inode->i_ino);
454 ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
455 ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
456 ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
457 ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
458 ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
459 ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
460 ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Eric W. Biederman39241be2012-02-07 15:50:56 -0800461 ino->uid = cpu_to_le32(i_uid_read(inode));
462 ino->gid = cpu_to_le32(i_gid_read(inode));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300463 ino->mode = cpu_to_le32(inode->i_mode);
464 ino->flags = cpu_to_le32(ui->flags);
465 ino->size = cpu_to_le64(ui->ui_size);
466 ino->nlink = cpu_to_le32(inode->i_nlink);
467 ino->compr_type = cpu_to_le16(ui->compr_type);
468 ino->data_len = cpu_to_le32(ui->data_len);
469 ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
470 ino->xattr_size = cpu_to_le32(ui->xattr_size);
471 ino->xattr_names = cpu_to_le32(ui->xattr_names);
472 zero_ino_node_unused(ino);
473
474 /*
475 * Drop the attached data if this is a deletion inode, the data is not
476 * needed anymore.
477 */
478 if (!last_reference) {
479 memcpy(ino->data, ui->data, ui->data_len);
480 data_len = ui->data_len;
481 }
482
483 ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
484}
485
486/**
487 * mark_inode_clean - mark UBIFS inode as clean.
488 * @c: UBIFS file-system description object
489 * @ui: UBIFS inode to mark as clean
490 *
491 * This helper function marks UBIFS inode @ui as clean by cleaning the
492 * @ui->dirty flag and releasing its budget. Note, VFS may still treat the
493 * inode as dirty and try to write it back, but 'ubifs_write_inode()' would
494 * just do nothing.
495 */
496static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
497{
498 if (ui->dirty)
499 ubifs_release_dirty_inode_budget(c, ui);
500 ui->dirty = 0;
501}
502
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200503static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
504{
505 if (c->double_hash)
Ben Dooks (Codethink)3cfa4412019-10-16 11:04:09 +0100506 dent->cookie = (__force __le32) prandom_u32();
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200507 else
508 dent->cookie = 0;
509}
510
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300511/**
512 * ubifs_jnl_update - update inode.
513 * @c: UBIFS file-system description object
514 * @dir: parent inode or host inode in case of extended attributes
515 * @nm: directory entry name
516 * @inode: inode to update
517 * @deletion: indicates a directory entry deletion i.e unlink or rmdir
518 * @xent: non-zero if the directory entry is an extended attribute entry
519 *
520 * This function updates an inode by writing a directory entry (or extended
521 * attribute entry), the inode itself, and the parent directory inode (or the
522 * host inode) to the journal.
523 *
524 * The function writes the host inode @dir last, which is important in case of
525 * extended attributes. Indeed, then we guarantee that if the host inode gets
526 * synchronized (with 'fsync()'), and the write-buffer it sits in gets flushed,
527 * the extended attribute inode gets flushed too. And this is exactly what the
528 * user expects - synchronizing the host inode synchronizes its extended
529 * attributes. Similarly, this guarantees that if @dir is synchronized, its
530 * directory entry corresponding to @nm gets synchronized too.
531 *
532 * If the inode (@inode) or the parent directory (@dir) are synchronous, this
533 * function synchronizes the write-buffer.
534 *
535 * This function marks the @dir and @inode inodes as clean and returns zero on
536 * success. In case of failure, a negative error code is returned.
537 */
538int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100539 const struct fscrypt_name *nm, const struct inode *inode,
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300540 int deletion, int xent)
541{
Zhihao Cheng094b6d12020-07-07 20:51:40 +0800542 int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300543 int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
544 int last_reference = !!(deletion && inode->i_nlink == 0);
545 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200546 struct ubifs_inode *host_ui = ubifs_inode(dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300547 struct ubifs_dent_node *dent;
548 struct ubifs_ino_node *ino;
549 union ubifs_key dent_key, ino_key;
Sascha Hauer823838a2018-09-07 14:36:34 +0200550 u8 hash_dent[UBIFS_HASH_ARR_SZ];
551 u8 hash_ino[UBIFS_HASH_ARR_SZ];
552 u8 hash_ino_host[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300553
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200554 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300555
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100556 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300557 ilen = UBIFS_INO_NODE_SZ;
558
559 /*
560 * If the last reference to the inode is being deleted, then there is
561 * no need to attach and write inode data, it is being deleted anyway.
562 * And if the inode is being deleted, no need to synchronize
563 * write-buffer even if the inode is synchronous.
564 */
565 if (!last_reference) {
566 ilen += ui->data_len;
567 sync |= IS_SYNC(inode);
568 }
569
570 aligned_dlen = ALIGN(dlen, 8);
571 aligned_ilen = ALIGN(ilen, 8);
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500572
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300573 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500574 /* Make sure to also account for extended attributes */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200575 if (ubifs_authenticated(c))
576 len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c);
577 else
578 len += host_ui->data_len;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500579
Richard Weinberger4acadda2017-06-16 16:21:44 +0200580 dent = kzalloc(len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300581 if (!dent)
582 return -ENOMEM;
583
584 /* Make reservation before allocating sequence numbers */
585 err = make_reservation(c, BASEHD, len);
586 if (err)
587 goto out_free;
588
589 if (!xent) {
590 dent->ch.node_type = UBIFS_DENT_NODE;
Eric Biggersaec992a2020-01-20 14:32:00 -0800591 if (fname_name(nm) == NULL)
Richard Weinberger781f6752017-05-17 10:36:46 +0200592 dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
593 else
594 dent_key_init(c, &dent_key, dir->i_ino, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300595 } else {
596 dent->ch.node_type = UBIFS_XENT_NODE;
597 xent_key_init(c, &dent_key, dir->i_ino, nm);
598 }
599
600 key_write(c, &dent_key, dent->key);
601 dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
602 dent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100603 dent->nlen = cpu_to_le16(fname_len(nm));
604 memcpy(dent->name, fname_name(nm), fname_len(nm));
605 dent->name[fname_len(nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +0200606 set_dent_cookie(c, dent);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +0100607
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300608 zero_dent_node_unused(dent);
609 ubifs_prep_grp_node(c, dent, dlen, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +0200610 err = ubifs_node_calc_hash(c, dent, hash_dent);
611 if (err)
612 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300613
614 ino = (void *)dent + aligned_dlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300615 pack_inode(c, ino, inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +0200616 err = ubifs_node_calc_hash(c, ino, hash_ino);
617 if (err)
618 goto out_release;
619
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300620 ino = (void *)ino + aligned_ilen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300621 pack_inode(c, ino, dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +0200622 err = ubifs_node_calc_hash(c, ino, hash_ino_host);
623 if (err)
624 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300625
626 if (last_reference) {
627 err = ubifs_add_orphan(c, inode->i_ino);
628 if (err) {
629 release_head(c, BASEHD);
630 goto out_finish;
631 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300632 ui->del_cmtno = c->cmt_no;
Zhihao Cheng094b6d12020-07-07 20:51:40 +0800633 orphan_added = 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300634 }
635
636 err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
637 if (err)
638 goto out_release;
639 if (!sync) {
640 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
641
642 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
643 ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
644 }
645 release_head(c, BASEHD);
646 kfree(dent);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200647 ubifs_add_auth_dirt(c, lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300648
649 if (deletion) {
Eric Biggersaec992a2020-01-20 14:32:00 -0800650 if (fname_name(nm) == NULL)
Richard Weinberger781f6752017-05-17 10:36:46 +0200651 err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
652 else
653 err = ubifs_tnc_remove_nm(c, &dent_key, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300654 if (err)
655 goto out_ro;
656 err = ubifs_add_dirt(c, lnum, dlen);
657 } else
Sascha Hauer823838a2018-09-07 14:36:34 +0200658 err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen,
659 hash_dent, nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300660 if (err)
661 goto out_ro;
662
663 /*
664 * Note, we do not remove the inode from TNC even if the last reference
665 * to it has just been deleted, because the inode may still be opened.
666 * Instead, the inode has been added to orphan lists and the orphan
667 * subsystem will take further care about it.
668 */
669 ino_key_init(c, &ino_key, inode->i_ino);
670 ino_offs = dent_offs + aligned_dlen;
Sascha Hauer823838a2018-09-07 14:36:34 +0200671 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300672 if (err)
673 goto out_ro;
674
675 ino_key_init(c, &ino_key, dir->i_ino);
676 ino_offs += aligned_ilen;
Subodh Nijsurea76284e2014-10-31 13:50:28 -0500677 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
Sascha Hauer823838a2018-09-07 14:36:34 +0200678 UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300679 if (err)
680 goto out_ro;
681
682 finish_reservation(c);
683 spin_lock(&ui->ui_lock);
684 ui->synced_i_size = ui->ui_size;
685 spin_unlock(&ui->ui_lock);
Richard Weinberger59965592018-06-12 00:52:28 +0200686 if (xent) {
687 spin_lock(&host_ui->ui_lock);
688 host_ui->synced_i_size = host_ui->ui_size;
689 spin_unlock(&host_ui->ui_lock);
690 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300691 mark_inode_clean(c, ui);
Richard Weinbergerd577bc12014-09-19 11:48:46 +0200692 mark_inode_clean(c, host_ui);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300693 return 0;
694
695out_finish:
696 finish_reservation(c);
697out_free:
698 kfree(dent);
699 return err;
700
701out_release:
702 release_head(c, BASEHD);
Artem Bityutskiy812eb252011-05-31 08:40:40 +0300703 kfree(dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300704out_ro:
705 ubifs_ro_mode(c, err);
Zhihao Cheng094b6d12020-07-07 20:51:40 +0800706 if (orphan_added)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300707 ubifs_delete_orphan(c, inode->i_ino);
708 finish_reservation(c);
709 return err;
710}
711
712/**
713 * ubifs_jnl_write_data - write a data node to the journal.
714 * @c: UBIFS file-system description object
715 * @inode: inode the data node belongs to
716 * @key: node key
717 * @buf: buffer to write
718 * @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
719 *
720 * This function writes a data node to the journal. Returns %0 if the data node
721 * was successfully written, and a negative error code in case of failure.
722 */
723int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
724 const union ubifs_key *key, const void *buf, int len)
725{
726 struct ubifs_data_node *data;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200727 int err, lnum, offs, compr_type, out_len, compr_len, auth_len;
Matthew L. Creechd8829622011-03-04 17:55:02 -0500728 int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200729 int write_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300730 struct ubifs_inode *ui = ubifs_inode(inode);
Eric Biggers50d9fad2019-12-09 13:27:21 -0800731 bool encrypted = IS_ENCRYPTED(inode);
Sascha Hauer823838a2018-09-07 14:36:34 +0200732 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300733
Artem Bityutskiy515315a2012-01-13 12:33:53 +0200734 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
735 (unsigned long)key_inum(c, key), key_block(c, key), len);
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200736 ubifs_assert(c, len <= UBIFS_BLOCK_SIZE);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300737
Richard Weinberger77999532016-09-29 22:20:19 +0200738 if (encrypted)
739 dlen += UBIFS_CIPHER_BLOCK_SIZE;
740
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200741 auth_len = ubifs_auth_node_sz(c);
742
743 data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500744 if (!data) {
745 /*
746 * Fall-back to the write reserve buffer. Note, we might be
747 * currently on the memory reclaim path, when the kernel is
748 * trying to free some memory by writing out dirty pages. The
749 * write reserve buffer helps us to guarantee that we are
750 * always able to write the data.
751 */
752 allocated = 0;
753 mutex_lock(&c->write_reserve_mutex);
754 data = c->write_reserve_buf;
755 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300756
757 data->ch.node_type = UBIFS_DATA_NODE;
758 key_write(c, key, &data->key);
759 data->size = cpu_to_le32(len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300760
Artem Bityutskiya9f2fc02008-12-23 14:39:14 +0200761 if (!(ui->flags & UBIFS_COMPR_FL))
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300762 /* Compression is disabled for this inode */
763 compr_type = UBIFS_COMPR_NONE;
764 else
765 compr_type = ui->compr_type;
766
Richard Weinberger77999532016-09-29 22:20:19 +0200767 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
768 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
Richard Weinberger6eb61d52018-07-12 13:01:57 +0200769 ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
Richard Weinberger77999532016-09-29 22:20:19 +0200770
771 if (encrypted) {
772 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
773 if (err)
774 goto out_free;
775
776 } else {
777 data->compr_size = 0;
Peter Rosin507502a2017-01-04 09:38:29 +0100778 out_len = compr_len;
Richard Weinberger77999532016-09-29 22:20:19 +0200779 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300780
781 dlen = UBIFS_DATA_NODE_SZ + out_len;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200782 if (ubifs_authenticated(c))
783 write_len = ALIGN(dlen, 8) + auth_len;
784 else
785 write_len = dlen;
786
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300787 data->compr_type = cpu_to_le16(compr_type);
788
789 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200790 err = make_reservation(c, DATAHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300791 if (err)
792 goto out_free;
793
Sascha Hauer83407432018-09-07 14:36:28 +0200794 ubifs_prepare_node(c, data, dlen, 0);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200795 err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300796 if (err)
797 goto out_release;
Sascha Hauer823838a2018-09-07 14:36:34 +0200798
799 err = ubifs_node_calc_hash(c, data, hash);
800 if (err)
801 goto out_release;
802
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300803 ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
804 release_head(c, DATAHD);
805
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200806 ubifs_add_auth_dirt(c, lnum);
807
Sascha Hauer823838a2018-09-07 14:36:34 +0200808 err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300809 if (err)
810 goto out_ro;
811
812 finish_reservation(c);
Matthew L. Creechd8829622011-03-04 17:55:02 -0500813 if (!allocated)
814 mutex_unlock(&c->write_reserve_mutex);
815 else
816 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300817 return 0;
818
819out_release:
820 release_head(c, DATAHD);
821out_ro:
822 ubifs_ro_mode(c, err);
823 finish_reservation(c);
824out_free:
Matthew L. Creechd8829622011-03-04 17:55:02 -0500825 if (!allocated)
826 mutex_unlock(&c->write_reserve_mutex);
827 else
828 kfree(data);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300829 return err;
830}
831
832/**
833 * ubifs_jnl_write_inode - flush inode to the journal.
834 * @c: UBIFS file-system description object
835 * @inode: inode to flush
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300836 *
837 * This function writes inode @inode to the journal. If the inode is
838 * synchronous, it also synchronizes the write-buffer. Returns zero in case of
839 * success and a negative error code in case of failure.
840 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300841int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300842{
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300843 int err, lnum, offs;
Richard Weinberger7959cf32019-04-05 00:34:36 +0200844 struct ubifs_ino_node *ino, *ino_start;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300845 struct ubifs_inode *ui = ubifs_inode(inode);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200846 int sync = 0, write_len = 0, ilen = UBIFS_INO_NODE_SZ;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200847 int last_reference = !inode->i_nlink;
Richard Weinberger7959cf32019-04-05 00:34:36 +0200848 int kill_xattrs = ui->xattr_cnt && last_reference;
Sascha Hauer823838a2018-09-07 14:36:34 +0200849 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300850
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300851 dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300852
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300853 /*
854 * If the inode is being deleted, do not write the attached data. No
855 * need to synchronize the write-buffer either.
856 */
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300857 if (!last_reference) {
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200858 ilen += ui->data_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300859 sync = IS_SYNC(inode);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200860 } else if (kill_xattrs) {
861 write_len += UBIFS_INO_NODE_SZ * ui->xattr_cnt;
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300862 }
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200863
864 if (ubifs_authenticated(c))
Richard Weinberger7959cf32019-04-05 00:34:36 +0200865 write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200866 else
Richard Weinberger7959cf32019-04-05 00:34:36 +0200867 write_len += ilen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200868
Richard Weinberger7959cf32019-04-05 00:34:36 +0200869 ino_start = ino = kmalloc(write_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300870 if (!ino)
871 return -ENOMEM;
872
873 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200874 err = make_reservation(c, BASEHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300875 if (err)
876 goto out_free;
877
Richard Weinberger7959cf32019-04-05 00:34:36 +0200878 if (kill_xattrs) {
879 union ubifs_key key;
880 struct fscrypt_name nm = {0};
881 struct inode *xino;
882 struct ubifs_dent_node *xent, *pxent = NULL;
883
Sascha Hauerd984bcf2021-01-20 14:12:53 +0100884 if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
Zhen Leia2c2a622021-05-08 11:33:13 +0800885 err = -EPERM;
Richard Weinberger9ca2d732019-04-05 00:34:38 +0200886 ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
887 goto out_release;
888 }
889
Richard Weinberger7959cf32019-04-05 00:34:36 +0200890 lowest_xent_key(c, &key, inode->i_ino);
891 while (1) {
892 xent = ubifs_tnc_next_ent(c, &key, &nm);
893 if (IS_ERR(xent)) {
894 err = PTR_ERR(xent);
895 if (err == -ENOENT)
896 break;
897
Zhihao Chengf2aae742020-06-01 17:10:36 +0800898 kfree(pxent);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200899 goto out_release;
900 }
901
902 fname_name(&nm) = xent->name;
903 fname_len(&nm) = le16_to_cpu(xent->nlen);
904
Ben Dooks (Codethink)df22b5b2019-10-16 11:08:03 +0100905 xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
Richard Weinberger9ca2d732019-04-05 00:34:38 +0200906 if (IS_ERR(xino)) {
907 err = PTR_ERR(xino);
908 ubifs_err(c, "dead directory entry '%s', error %d",
909 xent->name, err);
910 ubifs_ro_mode(c, err);
Zhihao Chengf2aae742020-06-01 17:10:36 +0800911 kfree(pxent);
Zhihao Cheng81423c72020-03-03 17:40:22 +0800912 kfree(xent);
Richard Weinberger9ca2d732019-04-05 00:34:38 +0200913 goto out_release;
914 }
Richard Weinberger7959cf32019-04-05 00:34:36 +0200915 ubifs_assert(c, ubifs_inode(xino)->xattr);
916
917 clear_nlink(xino);
918 pack_inode(c, ino, xino, 0);
919 ino = (void *)ino + UBIFS_INO_NODE_SZ;
920 iput(xino);
921
922 kfree(pxent);
923 pxent = xent;
924 key_read(c, &xent->key, &key);
925 }
926 kfree(pxent);
927 }
928
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +0300929 pack_inode(c, ino, inode, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +0200930 err = ubifs_node_calc_hash(c, ino, hash);
931 if (err)
932 goto out_release;
933
Richard Weinberger7959cf32019-04-05 00:34:36 +0200934 err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300935 if (err)
936 goto out_release;
937 if (!sync)
938 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
939 inode->i_ino);
940 release_head(c, BASEHD);
941
Artem Bityutskiy1f286812008-07-22 12:06:13 +0300942 if (last_reference) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300943 err = ubifs_tnc_remove_ino(c, inode->i_ino);
944 if (err)
945 goto out_ro;
946 ubifs_delete_orphan(c, inode->i_ino);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200947 err = ubifs_add_dirt(c, lnum, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300948 } else {
949 union ubifs_key key;
950
Richard Weinberger78c7d492020-09-28 20:58:59 +0200951 ubifs_add_auth_dirt(c, lnum);
952
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300953 ino_key_init(c, &key, inode->i_ino);
Sascha Hauer6a98bc42018-09-07 14:36:36 +0200954 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300955 }
956 if (err)
957 goto out_ro;
958
959 finish_reservation(c);
960 spin_lock(&ui->ui_lock);
961 ui->synced_i_size = ui->ui_size;
962 spin_unlock(&ui->ui_lock);
Richard Weinberger7959cf32019-04-05 00:34:36 +0200963 kfree(ino_start);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300964 return 0;
965
966out_release:
967 release_head(c, BASEHD);
968out_ro:
969 ubifs_ro_mode(c, err);
970 finish_reservation(c);
971out_free:
Richard Weinberger7959cf32019-04-05 00:34:36 +0200972 kfree(ino_start);
Artem Bityutskiy1e517642008-07-14 19:08:37 +0300973 return err;
974}
975
976/**
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300977 * ubifs_jnl_delete_inode - delete an inode.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300978 * @c: UBIFS file-system description object
979 * @inode: inode to delete
980 *
981 * This function deletes inode @inode which includes removing it from orphans,
982 * deleting it from TNC and, in some cases, writing a deletion inode to the
983 * journal.
984 *
985 * When regular file inodes are unlinked or a directory inode is removed, the
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300986 * 'ubifs_jnl_update()' function writes a corresponding deletion inode and
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300987 * direntry to the media, and adds the inode to orphans. After this, when the
988 * last reference to this inode has been dropped, this function is called. In
989 * general, it has to write one more deletion inode to the media, because if
990 * a commit happened between 'ubifs_jnl_update()' and
991 * 'ubifs_jnl_delete_inode()', the deletion inode is not in the journal
Adrian Hunter7d62ff22008-07-23 15:48:39 +0300992 * anymore, and in fact it might not be on the flash anymore, because it might
993 * have been garbage-collected already. And for optimization reasons UBIFS does
Artem Bityutskiyde94eb52008-07-22 13:06:20 +0300994 * not read the orphan area if it has been unmounted cleanly, so it would have
995 * no indication in the journal that there is a deleted inode which has to be
996 * removed from TNC.
997 *
998 * However, if there was no commit between 'ubifs_jnl_update()' and
999 * 'ubifs_jnl_delete_inode()', then there is no need to write the deletion
Adrian Hunter7d62ff22008-07-23 15:48:39 +03001000 * inode to the media for the second time. And this is quite a typical case.
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001001 *
1002 * This function returns zero in case of success and a negative error code in
1003 * case of failure.
1004 */
1005int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
1006{
1007 int err;
1008 struct ubifs_inode *ui = ubifs_inode(inode);
1009
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001010 ubifs_assert(c, inode->i_nlink == 0);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001011
Richard Weinberger7959cf32019-04-05 00:34:36 +02001012 if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no)
1013 /* A commit happened for sure or inode hosts xattrs */
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001014 return ubifs_jnl_write_inode(c, inode);
1015
1016 down_read(&c->commit_sem);
1017 /*
1018 * Check commit number again, because the first test has been done
1019 * without @c->commit_sem, so a commit might have happened.
1020 */
1021 if (ui->del_cmtno != c->cmt_no) {
1022 up_read(&c->commit_sem);
1023 return ubifs_jnl_write_inode(c, inode);
1024 }
1025
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001026 err = ubifs_tnc_remove_ino(c, inode->i_ino);
1027 if (err)
1028 ubifs_ro_mode(c, err);
Adrian Hunterf7691082008-07-23 16:55:55 +03001029 else
1030 ubifs_delete_orphan(c, inode->i_ino);
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001031 up_read(&c->commit_sem);
1032 return err;
1033}
1034
1035/**
Richard Weinberger9ec64962016-09-14 22:28:51 +02001036 * ubifs_jnl_xrename - cross rename two directory entries.
1037 * @c: UBIFS file-system description object
1038 * @fst_dir: parent inode of 1st directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001039 * @fst_inode: 1st inode to exchange
1040 * @fst_nm: name of 1st inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +02001041 * @snd_dir: parent inode of 2nd directory entry to exchange
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001042 * @snd_inode: 2nd inode to exchange
1043 * @snd_nm: name of 2nd inode to exchange
Richard Weinberger9ec64962016-09-14 22:28:51 +02001044 * @sync: non-zero if the write-buffer has to be synchronized
1045 *
1046 * This function implements the cross rename operation which may involve
1047 * writing 2 inodes and 2 directory entries. It marks the written inodes as clean
1048 * and returns zero on success. In case of failure, a negative error code is
1049 * returned.
1050 */
1051int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001052 const struct inode *fst_inode,
1053 const struct fscrypt_name *fst_nm,
Richard Weinberger9ec64962016-09-14 22:28:51 +02001054 const struct inode *snd_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001055 const struct inode *snd_inode,
1056 const struct fscrypt_name *snd_nm, int sync)
Richard Weinberger9ec64962016-09-14 22:28:51 +02001057{
1058 union ubifs_key key;
1059 struct ubifs_dent_node *dent1, *dent2;
1060 int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
1061 int aligned_dlen1, aligned_dlen2;
1062 int twoparents = (fst_dir != snd_dir);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001063 void *p;
Sascha Hauer823838a2018-09-07 14:36:34 +02001064 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1065 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
1066 u8 hash_p1[UBIFS_HASH_ARR_SZ];
1067 u8 hash_p2[UBIFS_HASH_ARR_SZ];
Richard Weinberger9ec64962016-09-14 22:28:51 +02001068
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001069 ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
1070 ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
1071 ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
1072 ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
Richard Weinberger9ec64962016-09-14 22:28:51 +02001073
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001074 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
1075 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001076 aligned_dlen1 = ALIGN(dlen1, 8);
1077 aligned_dlen2 = ALIGN(dlen2, 8);
1078
1079 len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
1080 if (twoparents)
1081 len += plen;
1082
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001083 len += ubifs_auth_node_sz(c);
1084
Richard Weinberger4acadda2017-06-16 16:21:44 +02001085 dent1 = kzalloc(len, GFP_NOFS);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001086 if (!dent1)
1087 return -ENOMEM;
1088
1089 /* Make reservation before allocating sequence numbers */
1090 err = make_reservation(c, BASEHD, len);
1091 if (err)
1092 goto out_free;
1093
1094 /* Make new dent for 1st entry */
1095 dent1->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001096 dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001097 dent1->inum = cpu_to_le64(fst_inode->i_ino);
1098 dent1->type = get_dent_type(fst_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001099 dent1->nlen = cpu_to_le16(fname_len(snd_nm));
1100 memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
1101 dent1->name[fname_len(snd_nm)] = '\0';
Richard Weinbergera6664432017-06-26 13:49:04 +02001102 set_dent_cookie(c, dent1);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001103 zero_dent_node_unused(dent1);
1104 ubifs_prep_grp_node(c, dent1, dlen1, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001105 err = ubifs_node_calc_hash(c, dent1, hash_dent1);
1106 if (err)
1107 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001108
1109 /* Make new dent for 2nd entry */
1110 dent2 = (void *)dent1 + aligned_dlen1;
1111 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001112 dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001113 dent2->inum = cpu_to_le64(snd_inode->i_ino);
1114 dent2->type = get_dent_type(snd_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001115 dent2->nlen = cpu_to_le16(fname_len(fst_nm));
1116 memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
1117 dent2->name[fname_len(fst_nm)] = '\0';
Richard Weinbergera6664432017-06-26 13:49:04 +02001118 set_dent_cookie(c, dent2);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001119 zero_dent_node_unused(dent2);
1120 ubifs_prep_grp_node(c, dent2, dlen2, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001121 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1122 if (err)
1123 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001124
1125 p = (void *)dent2 + aligned_dlen2;
Sascha Hauer823838a2018-09-07 14:36:34 +02001126 if (!twoparents) {
Richard Weinberger9ec64962016-09-14 22:28:51 +02001127 pack_inode(c, p, fst_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001128 err = ubifs_node_calc_hash(c, p, hash_p1);
1129 if (err)
1130 goto out_release;
1131 } else {
Richard Weinberger9ec64962016-09-14 22:28:51 +02001132 pack_inode(c, p, fst_dir, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001133 err = ubifs_node_calc_hash(c, p, hash_p1);
1134 if (err)
1135 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001136 p += ALIGN(plen, 8);
1137 pack_inode(c, p, snd_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001138 err = ubifs_node_calc_hash(c, p, hash_p2);
1139 if (err)
1140 goto out_release;
Richard Weinberger9ec64962016-09-14 22:28:51 +02001141 }
1142
1143 err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
1144 if (err)
1145 goto out_release;
1146 if (!sync) {
1147 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1148
1149 ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
1150 ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
1151 }
1152 release_head(c, BASEHD);
1153
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001154 ubifs_add_auth_dirt(c, lnum);
1155
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001156 dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001157 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001158 if (err)
1159 goto out_ro;
1160
1161 offs += aligned_dlen1;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001162 dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001163 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001164 if (err)
1165 goto out_ro;
1166
1167 offs += aligned_dlen2;
1168
1169 ino_key_init(c, &key, fst_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001170 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001171 if (err)
1172 goto out_ro;
1173
1174 if (twoparents) {
1175 offs += ALIGN(plen, 8);
1176 ino_key_init(c, &key, snd_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001177 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2);
Richard Weinberger9ec64962016-09-14 22:28:51 +02001178 if (err)
1179 goto out_ro;
1180 }
1181
1182 finish_reservation(c);
1183
1184 mark_inode_clean(c, ubifs_inode(fst_dir));
1185 if (twoparents)
1186 mark_inode_clean(c, ubifs_inode(snd_dir));
1187 kfree(dent1);
1188 return 0;
1189
1190out_release:
1191 release_head(c, BASEHD);
1192out_ro:
1193 ubifs_ro_mode(c, err);
1194 finish_reservation(c);
1195out_free:
1196 kfree(dent1);
1197 return err;
1198}
1199
1200/**
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001201 * ubifs_jnl_rename - rename a directory entry.
1202 * @c: UBIFS file-system description object
1203 * @old_dir: parent inode of directory entry to rename
1204 * @old_dentry: directory entry to rename
1205 * @new_dir: parent inode of directory entry to rename
1206 * @new_dentry: new directory entry (or directory entry to replace)
1207 * @sync: non-zero if the write-buffer has to be synchronized
1208 *
1209 * This function implements the re-name operation which may involve writing up
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001210 * to 4 inodes and 2 directory entries. It marks the written inodes as clean
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001211 * and returns zero on success. In case of failure, a negative error code is
1212 * returned.
1213 */
1214int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001215 const struct inode *old_inode,
1216 const struct fscrypt_name *old_nm,
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001217 const struct inode *new_dir,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001218 const struct inode *new_inode,
1219 const struct fscrypt_name *new_nm,
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001220 const struct inode *whiteout, int sync)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001221{
1222 void *p;
1223 union ubifs_key key;
1224 struct ubifs_dent_node *dent, *dent2;
Zhihao Cheng094b6d12020-07-07 20:51:40 +08001225 int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001226 int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
1227 int last_reference = !!(new_inode && new_inode->i_nlink == 0);
1228 int move = (old_dir != new_dir);
Kees Cook3f649ab2020-06-03 13:09:38 -07001229 struct ubifs_inode *new_ui;
Sascha Hauer823838a2018-09-07 14:36:34 +02001230 u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
1231 u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
1232 u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
1233 u8 hash_dent1[UBIFS_HASH_ARR_SZ];
1234 u8 hash_dent2[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001235
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001236 ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
1237 ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
1238 ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
1239 ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001240
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001241 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
1242 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001243 if (new_inode) {
1244 new_ui = ubifs_inode(new_inode);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001245 ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001246 ilen = UBIFS_INO_NODE_SZ;
1247 if (!last_reference)
1248 ilen += new_ui->data_len;
1249 } else
1250 ilen = 0;
1251
1252 aligned_dlen1 = ALIGN(dlen1, 8);
1253 aligned_dlen2 = ALIGN(dlen2, 8);
1254 len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
Richard Weinberger1e039532016-09-14 22:28:52 +02001255 if (move)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001256 len += plen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001257
1258 len += ubifs_auth_node_sz(c);
1259
Richard Weinberger4acadda2017-06-16 16:21:44 +02001260 dent = kzalloc(len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001261 if (!dent)
1262 return -ENOMEM;
1263
1264 /* Make reservation before allocating sequence numbers */
1265 err = make_reservation(c, BASEHD, len);
1266 if (err)
1267 goto out_free;
1268
1269 /* Make new dent */
1270 dent->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001271 dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001272 dent->inum = cpu_to_le64(old_inode->i_ino);
1273 dent->type = get_dent_type(old_inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001274 dent->nlen = cpu_to_le16(fname_len(new_nm));
1275 memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
1276 dent->name[fname_len(new_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001277 set_dent_cookie(c, dent);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001278 zero_dent_node_unused(dent);
1279 ubifs_prep_grp_node(c, dent, dlen1, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001280 err = ubifs_node_calc_hash(c, dent, hash_dent1);
1281 if (err)
1282 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001283
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001284 dent2 = (void *)dent + aligned_dlen1;
1285 dent2->ch.node_type = UBIFS_DENT_NODE;
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001286 dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001287
1288 if (whiteout) {
1289 dent2->inum = cpu_to_le64(whiteout->i_ino);
1290 dent2->type = get_dent_type(whiteout->i_mode);
1291 } else {
1292 /* Make deletion dent */
1293 dent2->inum = 0;
1294 dent2->type = DT_UNKNOWN;
1295 }
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001296 dent2->nlen = cpu_to_le16(fname_len(old_nm));
1297 memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
1298 dent2->name[fname_len(old_nm)] = '\0';
Richard Weinbergerd63d61c2016-10-19 15:59:12 +02001299 set_dent_cookie(c, dent2);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001300 zero_dent_node_unused(dent2);
1301 ubifs_prep_grp_node(c, dent2, dlen2, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001302 err = ubifs_node_calc_hash(c, dent2, hash_dent2);
1303 if (err)
1304 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001305
1306 p = (void *)dent2 + aligned_dlen2;
1307 if (new_inode) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001308 pack_inode(c, p, new_inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001309 err = ubifs_node_calc_hash(c, p, hash_new_inode);
1310 if (err)
1311 goto out_release;
1312
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001313 p += ALIGN(ilen, 8);
1314 }
1315
Sascha Hauer823838a2018-09-07 14:36:34 +02001316 if (!move) {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001317 pack_inode(c, p, old_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001318 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1319 if (err)
1320 goto out_release;
1321 } else {
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001322 pack_inode(c, p, old_dir, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001323 err = ubifs_node_calc_hash(c, p, hash_old_dir);
1324 if (err)
1325 goto out_release;
1326
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001327 p += ALIGN(plen, 8);
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001328 pack_inode(c, p, new_dir, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001329 err = ubifs_node_calc_hash(c, p, hash_new_dir);
1330 if (err)
1331 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001332 }
1333
1334 if (last_reference) {
1335 err = ubifs_add_orphan(c, new_inode->i_ino);
1336 if (err) {
1337 release_head(c, BASEHD);
1338 goto out_finish;
1339 }
Artem Bityutskiyde94eb52008-07-22 13:06:20 +03001340 new_ui->del_cmtno = c->cmt_no;
Zhihao Cheng094b6d12020-07-07 20:51:40 +08001341 orphan_added = 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001342 }
1343
1344 err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1345 if (err)
1346 goto out_release;
1347 if (!sync) {
1348 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1349
1350 ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1351 ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1352 if (new_inode)
1353 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
1354 new_inode->i_ino);
1355 }
1356 release_head(c, BASEHD);
1357
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001358 ubifs_add_auth_dirt(c, lnum);
1359
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001360 dent_key_init(c, &key, new_dir->i_ino, new_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001361 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001362 if (err)
1363 goto out_ro;
1364
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001365 offs += aligned_dlen1;
1366 if (whiteout) {
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001367 dent_key_init(c, &key, old_dir->i_ino, old_nm);
Sascha Hauer823838a2018-09-07 14:36:34 +02001368 err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001369 if (err)
1370 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001371
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001372 ubifs_delete_orphan(c, whiteout->i_ino);
1373 } else {
1374 err = ubifs_add_dirt(c, lnum, dlen2);
1375 if (err)
1376 goto out_ro;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001377
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001378 dent_key_init(c, &key, old_dir->i_ino, old_nm);
1379 err = ubifs_tnc_remove_nm(c, &key, old_nm);
Richard Weinberger9e0a1ff2016-09-14 22:28:50 +02001380 if (err)
1381 goto out_ro;
1382 }
1383
1384 offs += aligned_dlen2;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001385 if (new_inode) {
1386 ino_key_init(c, &key, new_inode->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001387 err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001388 if (err)
1389 goto out_ro;
1390 offs += ALIGN(ilen, 8);
1391 }
1392
1393 ino_key_init(c, &key, old_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001394 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001395 if (err)
1396 goto out_ro;
1397
Richard Weinberger1e039532016-09-14 22:28:52 +02001398 if (move) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001399 offs += ALIGN(plen, 8);
1400 ino_key_init(c, &key, new_dir->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001401 err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001402 if (err)
1403 goto out_ro;
1404 }
1405
1406 finish_reservation(c);
1407 if (new_inode) {
1408 mark_inode_clean(c, new_ui);
1409 spin_lock(&new_ui->ui_lock);
1410 new_ui->synced_i_size = new_ui->ui_size;
1411 spin_unlock(&new_ui->ui_lock);
1412 }
1413 mark_inode_clean(c, ubifs_inode(old_dir));
1414 if (move)
1415 mark_inode_clean(c, ubifs_inode(new_dir));
1416 kfree(dent);
1417 return 0;
1418
1419out_release:
1420 release_head(c, BASEHD);
1421out_ro:
1422 ubifs_ro_mode(c, err);
Zhihao Cheng094b6d12020-07-07 20:51:40 +08001423 if (orphan_added)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001424 ubifs_delete_orphan(c, new_inode->i_ino);
1425out_finish:
1426 finish_reservation(c);
1427out_free:
1428 kfree(dent);
1429 return err;
1430}
1431
1432/**
Richard Weinberger77999532016-09-29 22:20:19 +02001433 * truncate_data_node - re-compress/encrypt a truncated data node.
1434 * @c: UBIFS file-system description object
Zheng Yongjun07c32de2021-06-04 09:45:56 +08001435 * @inode: inode which refers to the data node
Richard Weinberger77999532016-09-29 22:20:19 +02001436 * @block: data block number
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001437 * @dn: data node to re-compress
1438 * @new_len: new length
1439 *
1440 * This function is used when an inode is truncated and the last data node of
Richard Weinberger77999532016-09-29 22:20:19 +02001441 * the inode has to be re-compressed/encrypted and re-written.
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001442 */
Richard Weinberger77999532016-09-29 22:20:19 +02001443static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
1444 unsigned int block, struct ubifs_data_node *dn,
1445 int *new_len)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001446{
1447 void *buf;
Richard Weinberger08acbdd2018-07-01 23:20:50 +02001448 int err, dlen, compr_type, out_len, old_dlen;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001449
1450 out_len = le32_to_cpu(dn->size);
Richard Weinbergera3d21822018-07-02 23:47:13 +02001451 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001452 if (!buf)
1453 return -ENOMEM;
1454
Richard Weinberger77999532016-09-29 22:20:19 +02001455 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001456 compr_type = le16_to_cpu(dn->compr_type);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001457
Eric Biggers50d9fad2019-12-09 13:27:21 -08001458 if (IS_ENCRYPTED(inode)) {
Richard Weinberger77999532016-09-29 22:20:19 +02001459 err = ubifs_decrypt(inode, dn, &dlen, block);
1460 if (err)
1461 goto out;
1462 }
1463
David Oberhollenzer59a74992017-05-17 10:36:45 +02001464 if (compr_type == UBIFS_COMPR_NONE) {
1465 out_len = *new_len;
1466 } else {
Richard Weinberger77999532016-09-29 22:20:19 +02001467 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
1468 if (err)
1469 goto out;
1470
1471 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
1472 }
1473
Eric Biggers50d9fad2019-12-09 13:27:21 -08001474 if (IS_ENCRYPTED(inode)) {
Richard Weinberger77999532016-09-29 22:20:19 +02001475 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
1476 if (err)
1477 goto out;
1478
1479 out_len = old_dlen;
1480 } else {
1481 dn->compr_size = 0;
1482 }
1483
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001484 ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001485 dn->compr_type = cpu_to_le16(compr_type);
1486 dn->size = cpu_to_le32(*new_len);
1487 *new_len = UBIFS_DATA_NODE_SZ + out_len;
Colin Ian Kinge8f19742016-12-16 13:32:39 +00001488 err = 0;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001489out:
1490 kfree(buf);
1491 return err;
1492}
1493
1494/**
1495 * ubifs_jnl_truncate - update the journal for a truncation.
1496 * @c: UBIFS file-system description object
1497 * @inode: inode to truncate
1498 * @old_size: old size
1499 * @new_size: new size
1500 *
1501 * When the size of a file decreases due to truncation, a truncation node is
1502 * written, the journal tree is updated, and the last data block is re-written
1503 * if it has been affected. The inode is also updated in order to synchronize
1504 * the new inode size.
1505 *
1506 * This function marks the inode as clean and returns zero on success. In case
1507 * of failure, a negative error code is returned.
1508 */
1509int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1510 loff_t old_size, loff_t new_size)
1511{
1512 union ubifs_key key, to_key;
1513 struct ubifs_ino_node *ino;
1514 struct ubifs_trun_node *trun;
Kees Cook3f649ab2020-06-03 13:09:38 -07001515 struct ubifs_data_node *dn;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001516 int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1517 struct ubifs_inode *ui = ubifs_inode(inode);
1518 ino_t inum = inode->i_ino;
1519 unsigned int blk;
Sascha Hauer823838a2018-09-07 14:36:34 +02001520 u8 hash_ino[UBIFS_HASH_ARR_SZ];
1521 u8 hash_dn[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001522
Artem Bityutskiye84461a2008-10-29 12:08:43 +02001523 dbg_jnl("ino %lu, size %lld -> %lld",
1524 (unsigned long)inum, old_size, new_size);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001525 ubifs_assert(c, !ui->data_len);
1526 ubifs_assert(c, S_ISREG(inode->i_mode));
1527 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001528
1529 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
1530 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001531
1532 sz += ubifs_auth_node_sz(c);
1533
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001534 ino = kmalloc(sz, GFP_NOFS);
1535 if (!ino)
1536 return -ENOMEM;
1537
1538 trun = (void *)ino + UBIFS_INO_NODE_SZ;
1539 trun->ch.node_type = UBIFS_TRUN_NODE;
1540 trun->inum = cpu_to_le32(inum);
1541 trun->old_size = cpu_to_le64(old_size);
1542 trun->new_size = cpu_to_le64(new_size);
1543 zero_trun_node_unused(trun);
1544
1545 dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1546 if (dlen) {
1547 /* Get last data block so it can be truncated */
1548 dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1549 blk = new_size >> UBIFS_BLOCK_SHIFT;
1550 data_key_init(c, &key, inum, blk);
Artem Bityutskiy515315a2012-01-13 12:33:53 +02001551 dbg_jnlk(&key, "last block key ");
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001552 err = ubifs_tnc_lookup(c, &key, dn);
1553 if (err == -ENOENT)
1554 dlen = 0; /* Not found (so it is a hole) */
1555 else if (err)
1556 goto out_free;
1557 else {
Richard Weinberger95a22d22018-07-01 23:20:51 +02001558 int dn_len = le32_to_cpu(dn->size);
1559
1560 if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
1561 ubifs_err(c, "bad data node (block %u, inode %lu)",
1562 blk, inode->i_ino);
Zhihao Chenga33e30a2020-06-16 15:11:44 +08001563 ubifs_dump_node(c, dn, sz - UBIFS_INO_NODE_SZ -
1564 UBIFS_TRUN_NODE_SZ);
Richard Weinberger95a22d22018-07-01 23:20:51 +02001565 goto out_free;
1566 }
1567
1568 if (dn_len <= dlen)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001569 dlen = 0; /* Nothing to do */
1570 else {
Richard Weinberger77999532016-09-29 22:20:19 +02001571 err = truncate_data_node(c, inode, blk, dn, &dlen);
1572 if (err)
1573 goto out_free;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001574 }
1575 }
1576 }
1577
1578 /* Must make reservation before allocating sequence numbers */
1579 len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001580
1581 if (ubifs_authenticated(c))
1582 len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c);
1583 else
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001584 len += dlen;
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001585
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001586 err = make_reservation(c, BASEHD, len);
1587 if (err)
1588 goto out_free;
1589
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001590 pack_inode(c, ino, inode, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001591 err = ubifs_node_calc_hash(c, ino, hash_ino);
1592 if (err)
1593 goto out_release;
1594
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001595 ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001596 if (dlen) {
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001597 ubifs_prep_grp_node(c, dn, dlen, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001598 err = ubifs_node_calc_hash(c, dn, hash_dn);
1599 if (err)
1600 goto out_release;
1601 }
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001602
1603 err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1604 if (err)
1605 goto out_release;
1606 if (!sync)
1607 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1608 release_head(c, BASEHD);
1609
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001610 ubifs_add_auth_dirt(c, lnum);
1611
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001612 if (dlen) {
1613 sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
Sascha Hauer823838a2018-09-07 14:36:34 +02001614 err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001615 if (err)
1616 goto out_ro;
1617 }
1618
1619 ino_key_init(c, &key, inum);
Sascha Hauer823838a2018-09-07 14:36:34 +02001620 err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001621 if (err)
1622 goto out_ro;
1623
1624 err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1625 if (err)
1626 goto out_ro;
1627
1628 bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1629 blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1630 data_key_init(c, &key, inum, blk);
1631
1632 bit = old_size & (UBIFS_BLOCK_SIZE - 1);
Artem Bityutskiyf92b9822008-12-28 11:34:26 +02001633 blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001634 data_key_init(c, &to_key, inum, blk);
1635
1636 err = ubifs_tnc_remove_range(c, &key, &to_key);
1637 if (err)
1638 goto out_ro;
1639
1640 finish_reservation(c);
1641 spin_lock(&ui->ui_lock);
1642 ui->synced_i_size = ui->ui_size;
1643 spin_unlock(&ui->ui_lock);
1644 mark_inode_clean(c, ui);
1645 kfree(ino);
1646 return 0;
1647
1648out_release:
1649 release_head(c, BASEHD);
1650out_ro:
1651 ubifs_ro_mode(c, err);
1652 finish_reservation(c);
1653out_free:
1654 kfree(ino);
1655 return err;
1656}
1657
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001658
1659/**
1660 * ubifs_jnl_delete_xattr - delete an extended attribute.
1661 * @c: UBIFS file-system description object
1662 * @host: host inode
1663 * @inode: extended attribute inode
1664 * @nm: extended attribute entry name
1665 *
1666 * This function delete an extended attribute which is very similar to
1667 * un-linking regular files - it writes a deletion xentry, a deletion inode and
1668 * updates the target inode. Returns zero in case of success and a negative
1669 * error code in case of failure.
1670 */
1671int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001672 const struct inode *inode,
1673 const struct fscrypt_name *nm)
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001674{
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001675 int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001676 struct ubifs_dent_node *xent;
1677 struct ubifs_ino_node *ino;
1678 union ubifs_key xent_key, key1, key2;
1679 int sync = IS_DIRSYNC(host);
1680 struct ubifs_inode *host_ui = ubifs_inode(host);
Sascha Hauer823838a2018-09-07 14:36:34 +02001681 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001682
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001683 ubifs_assert(c, inode->i_nlink == 0);
1684 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001685
1686 /*
1687 * Since we are deleting the inode, we do not bother to attach any data
1688 * to it and assume its length is %UBIFS_INO_NODE_SZ.
1689 */
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001690 xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001691 aligned_xlen = ALIGN(xlen, 8);
1692 hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1693 len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1694
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001695 write_len = len + ubifs_auth_node_sz(c);
1696
1697 xent = kzalloc(write_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001698 if (!xent)
1699 return -ENOMEM;
1700
1701 /* Make reservation before allocating sequence numbers */
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001702 err = make_reservation(c, BASEHD, write_len);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001703 if (err) {
1704 kfree(xent);
1705 return err;
1706 }
1707
1708 xent->ch.node_type = UBIFS_XENT_NODE;
1709 xent_key_init(c, &xent_key, host->i_ino, nm);
1710 key_write(c, &xent_key, xent->key);
1711 xent->inum = 0;
1712 xent->type = get_dent_type(inode->i_mode);
Richard Weinbergerf4f61d22016-11-11 22:50:29 +01001713 xent->nlen = cpu_to_le16(fname_len(nm));
1714 memcpy(xent->name, fname_name(nm), fname_len(nm));
1715 xent->name[fname_len(nm)] = '\0';
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001716 zero_dent_node_unused(xent);
1717 ubifs_prep_grp_node(c, xent, xlen, 0);
1718
1719 ino = (void *)xent + aligned_xlen;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001720 pack_inode(c, ino, inode, 0);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001721 ino = (void *)ino + UBIFS_INO_NODE_SZ;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001722 pack_inode(c, ino, host, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001723 err = ubifs_node_calc_hash(c, ino, hash);
1724 if (err)
1725 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001726
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001727 err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001728 if (!sync && !err)
1729 ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1730 release_head(c, BASEHD);
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001731
1732 ubifs_add_auth_dirt(c, lnum);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001733 kfree(xent);
1734 if (err)
1735 goto out_ro;
1736
1737 /* Remove the extended attribute entry from TNC */
1738 err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1739 if (err)
1740 goto out_ro;
1741 err = ubifs_add_dirt(c, lnum, xlen);
1742 if (err)
1743 goto out_ro;
1744
1745 /*
1746 * Remove all nodes belonging to the extended attribute inode from TNC.
1747 * Well, there actually must be only one node - the inode itself.
1748 */
1749 lowest_ino_key(c, &key1, inode->i_ino);
1750 highest_ino_key(c, &key2, inode->i_ino);
1751 err = ubifs_tnc_remove_range(c, &key1, &key2);
1752 if (err)
1753 goto out_ro;
1754 err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1755 if (err)
1756 goto out_ro;
1757
1758 /* And update TNC with the new host inode position */
1759 ino_key_init(c, &key1, host->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001760 err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001761 if (err)
1762 goto out_ro;
1763
1764 finish_reservation(c);
1765 spin_lock(&host_ui->ui_lock);
1766 host_ui->synced_i_size = host_ui->ui_size;
1767 spin_unlock(&host_ui->ui_lock);
1768 mark_inode_clean(c, host_ui);
1769 return 0;
1770
Sascha Hauer823838a2018-09-07 14:36:34 +02001771out_release:
1772 kfree(xent);
1773 release_head(c, BASEHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001774out_ro:
1775 ubifs_ro_mode(c, err);
1776 finish_reservation(c);
1777 return err;
1778}
1779
1780/**
1781 * ubifs_jnl_change_xattr - change an extended attribute.
1782 * @c: UBIFS file-system description object
1783 * @inode: extended attribute inode
1784 * @host: host inode
1785 *
1786 * This function writes the updated version of an extended attribute inode and
Artem Bityutskiy7d4e9cc2009-03-20 19:11:12 +02001787 * the host inode to the journal (to the base head). The host inode is written
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001788 * after the extended attribute inode in order to guarantee that the extended
1789 * attribute will be flushed when the inode is synchronized by 'fsync()' and
1790 * consequently, the write-buffer is synchronized. This function returns zero
1791 * in case of success and a negative error code in case of failure.
1792 */
1793int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1794 const struct inode *host)
1795{
1796 int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
Artem Bityutskiyc78c7e32008-08-12 16:30:12 +03001797 struct ubifs_inode *host_ui = ubifs_inode(host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001798 struct ubifs_ino_node *ino;
1799 union ubifs_key key;
1800 int sync = IS_DIRSYNC(host);
Sascha Hauer823838a2018-09-07 14:36:34 +02001801 u8 hash_host[UBIFS_HASH_ARR_SZ];
1802 u8 hash[UBIFS_HASH_ARR_SZ];
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001803
1804 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
Richard Weinberger6eb61d52018-07-12 13:01:57 +02001805 ubifs_assert(c, inode->i_nlink > 0);
1806 ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001807
1808 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1809 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1810 aligned_len1 = ALIGN(len1, 8);
1811 aligned_len = aligned_len1 + ALIGN(len2, 8);
1812
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001813 aligned_len += ubifs_auth_node_sz(c);
1814
Richard Weinberger4acadda2017-06-16 16:21:44 +02001815 ino = kzalloc(aligned_len, GFP_NOFS);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001816 if (!ino)
1817 return -ENOMEM;
1818
1819 /* Make reservation before allocating sequence numbers */
1820 err = make_reservation(c, BASEHD, aligned_len);
1821 if (err)
1822 goto out_free;
1823
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001824 pack_inode(c, ino, host, 0);
Sascha Hauer823838a2018-09-07 14:36:34 +02001825 err = ubifs_node_calc_hash(c, ino, hash_host);
1826 if (err)
1827 goto out_release;
Artem Bityutskiyfd6c6b52008-07-22 12:19:09 +03001828 pack_inode(c, (void *)ino + aligned_len1, inode, 1);
Sascha Hauer823838a2018-09-07 14:36:34 +02001829 err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash);
1830 if (err)
1831 goto out_release;
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001832
1833 err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1834 if (!sync && !err) {
1835 struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1836
1837 ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1838 ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1839 }
1840 release_head(c, BASEHD);
1841 if (err)
1842 goto out_ro;
1843
Sascha Hauer6a98bc42018-09-07 14:36:36 +02001844 ubifs_add_auth_dirt(c, lnum);
1845
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001846 ino_key_init(c, &key, host->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001847 err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001848 if (err)
1849 goto out_ro;
1850
1851 ino_key_init(c, &key, inode->i_ino);
Sascha Hauer823838a2018-09-07 14:36:34 +02001852 err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001853 if (err)
1854 goto out_ro;
1855
1856 finish_reservation(c);
1857 spin_lock(&host_ui->ui_lock);
1858 host_ui->synced_i_size = host_ui->ui_size;
1859 spin_unlock(&host_ui->ui_lock);
1860 mark_inode_clean(c, host_ui);
1861 kfree(ino);
1862 return 0;
1863
Sascha Hauer823838a2018-09-07 14:36:34 +02001864out_release:
1865 release_head(c, BASEHD);
Artem Bityutskiy1e517642008-07-14 19:08:37 +03001866out_ro:
1867 ubifs_ro_mode(c, err);
1868 finish_reservation(c);
1869out_free:
1870 kfree(ino);
1871 return err;
1872}
1873