Thomas Gleixner | 2b27bdc | 2019-05-29 16:57:50 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 2 | /* |
| 3 | * This file is part of UBIFS. |
| 4 | * |
| 5 | * Copyright (C) 2006-2008 Nokia Corporation. |
| 6 | * Copyright (C) 2006, 2007 University of Szeged, Hungary |
| 7 | * |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 8 | * Authors: Artem Bityutskiy (Битюцкий Артём) |
| 9 | * Adrian Hunter |
| 10 | * Zoltan Sogor |
| 11 | */ |
| 12 | |
| 13 | /* |
| 14 | * This file implements UBIFS I/O subsystem which provides various I/O-related |
| 15 | * helper functions (reading/writing/checking/validating nodes) and implements |
| 16 | * write-buffering support. Write buffers help to save space which otherwise |
| 17 | * would have been wasted for padding to the nearest minimal I/O unit boundary. |
| 18 | * Instead, data first goes to the write-buffer and is flushed when the |
| 19 | * buffer is full or when it is not used for some time (by timer). This is |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 20 | * similar to the mechanism is used by JFFS2. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 21 | * |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 22 | * UBIFS distinguishes between minimum write size (@c->min_io_size) and maximum |
| 23 | * write size (@c->max_write_size). The latter is the maximum amount of bytes |
| 24 | * the underlying flash is able to program at a time, and writing in |
| 25 | * @c->max_write_size units should presumably be faster. Obviously, |
| 26 | * @c->min_io_size <= @c->max_write_size. Write-buffers are of |
| 27 | * @c->max_write_size bytes in size for maximum performance. However, when a |
| 28 | * write-buffer is flushed, only the portion of it (aligned to @c->min_io_size |
| 29 | * boundary) which contains data is written, not the whole write-buffer, |
| 30 | * because this is more space-efficient. |
| 31 | * |
| 32 | * This optimization adds few complications to the code. Indeed, on the one |
| 33 | * hand, we want to write in optimal @c->max_write_size bytes chunks, which |
| 34 | * also means aligning writes at the @c->max_write_size bytes offsets. On the |
| 35 | * other hand, we do not want to waste space when synchronizing the write |
| 36 | * buffer, so during synchronization we writes in smaller chunks. And this makes |
| 37 | * the next write offset to be not aligned to @c->max_write_size bytes. So the |
| 38 | * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned |
| 39 | * to @c->max_write_size bytes again. We do this by temporarily shrinking |
| 40 | * write-buffer size (@wbuf->size). |
| 41 | * |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 42 | * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by |
| 43 | * mutexes defined inside these objects. Since sometimes upper-level code |
| 44 | * has to lock the write-buffer (e.g. journal space reservation code), many |
| 45 | * functions related to write-buffers have "nolock" suffix which means that the |
| 46 | * caller has to lock the write-buffer before calling this function. |
| 47 | * |
| 48 | * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not |
| 49 | * aligned, UBIFS starts the next node from the aligned address, and the padded |
| 50 | * bytes may contain any rubbish. In other words, UBIFS does not put padding |
| 51 | * bytes in those small gaps. Common headers of nodes store real node lengths, |
| 52 | * not aligned lengths. Indexing nodes also store real lengths in branches. |
| 53 | * |
| 54 | * UBIFS uses padding when it pads to the next min. I/O unit. In this case it |
| 55 | * uses padding nodes or padding bytes, if the padding node does not fit. |
| 56 | * |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 57 | * All UBIFS nodes are protected by CRC checksums and UBIFS checks CRC when |
| 58 | * they are read from the flash media. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 59 | */ |
| 60 | |
| 61 | #include <linux/crc32.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 62 | #include <linux/slab.h> |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 63 | #include "ubifs.h" |
| 64 | |
| 65 | /** |
Adrian Hunter | ff46d7b | 2008-07-21 15:39:05 +0300 | [diff] [blame] | 66 | * ubifs_ro_mode - switch UBIFS to read read-only mode. |
| 67 | * @c: UBIFS file-system description object |
| 68 | * @err: error code which is the reason of switching to R/O mode |
| 69 | */ |
| 70 | void ubifs_ro_mode(struct ubifs_info *c, int err) |
| 71 | { |
Artem Bityutskiy | 2680d72 | 2010-09-17 16:44:28 +0300 | [diff] [blame] | 72 | if (!c->ro_error) { |
| 73 | c->ro_error = 1; |
Artem Bityutskiy | ccb3eba | 2008-09-08 16:07:01 +0300 | [diff] [blame] | 74 | c->no_chk_data_crc = 0; |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 75 | c->vfs_sb->s_flags |= SB_RDONLY; |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 76 | ubifs_warn(c, "switched to read-only mode, error %d", err); |
Artem Bityutskiy | d033c98 | 2011-06-03 13:16:08 +0300 | [diff] [blame] | 77 | dump_stack(); |
Adrian Hunter | ff46d7b | 2008-07-21 15:39:05 +0300 | [diff] [blame] | 78 | } |
| 79 | } |
| 80 | |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 81 | /* |
| 82 | * Below are simple wrappers over UBI I/O functions which include some |
| 83 | * additional checks and UBIFS debugging stuff. See corresponding UBI function |
| 84 | * for more information. |
| 85 | */ |
| 86 | |
| 87 | int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, |
| 88 | int len, int even_ebadmsg) |
| 89 | { |
| 90 | int err; |
| 91 | |
| 92 | err = ubi_read(c->ubi, lnum, buf, offs, len); |
| 93 | /* |
| 94 | * In case of %-EBADMSG print the error message only if the |
| 95 | * @even_ebadmsg is true. |
| 96 | */ |
| 97 | if (err && (err != -EBADMSG || even_ebadmsg)) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 98 | ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 99 | len, lnum, offs, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 100 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 101 | } |
| 102 | return err; |
| 103 | } |
| 104 | |
| 105 | int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 106 | int len) |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 107 | { |
| 108 | int err; |
| 109 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 110 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 111 | if (c->ro_error) |
| 112 | return -EROFS; |
| 113 | if (!dbg_is_tst_rcvry(c)) |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 114 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 115 | else |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 116 | err = dbg_leb_write(c, lnum, buf, offs, len); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 117 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 118 | ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 119 | len, lnum, offs, err); |
| 120 | ubifs_ro_mode(c, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 121 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 122 | } |
| 123 | return err; |
| 124 | } |
| 125 | |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 126 | int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 127 | { |
| 128 | int err; |
| 129 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 130 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 131 | if (c->ro_error) |
| 132 | return -EROFS; |
| 133 | if (!dbg_is_tst_rcvry(c)) |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 134 | err = ubi_leb_change(c->ubi, lnum, buf, len); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 135 | else |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 136 | err = dbg_leb_change(c, lnum, buf, len); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 137 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 138 | ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 139 | len, lnum, err); |
| 140 | ubifs_ro_mode(c, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 141 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 142 | } |
| 143 | return err; |
| 144 | } |
| 145 | |
| 146 | int ubifs_leb_unmap(struct ubifs_info *c, int lnum) |
| 147 | { |
| 148 | int err; |
| 149 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 150 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 151 | if (c->ro_error) |
| 152 | return -EROFS; |
| 153 | if (!dbg_is_tst_rcvry(c)) |
| 154 | err = ubi_leb_unmap(c->ubi, lnum); |
| 155 | else |
Artem Bityutskiy | f57cb18 | 2011-06-03 14:51:41 +0300 | [diff] [blame] | 156 | err = dbg_leb_unmap(c, lnum); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 157 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 158 | ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 159 | ubifs_ro_mode(c, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 160 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 161 | } |
| 162 | return err; |
| 163 | } |
| 164 | |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 165 | int ubifs_leb_map(struct ubifs_info *c, int lnum) |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 166 | { |
| 167 | int err; |
| 168 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 169 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 170 | if (c->ro_error) |
| 171 | return -EROFS; |
| 172 | if (!dbg_is_tst_rcvry(c)) |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 173 | err = ubi_leb_map(c->ubi, lnum); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 174 | else |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 175 | err = dbg_leb_map(c, lnum); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 176 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 177 | ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 178 | ubifs_ro_mode(c, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 179 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 180 | } |
| 181 | return err; |
| 182 | } |
| 183 | |
| 184 | int ubifs_is_mapped(const struct ubifs_info *c, int lnum) |
| 185 | { |
| 186 | int err; |
| 187 | |
| 188 | err = ubi_is_mapped(c->ubi, lnum); |
| 189 | if (err < 0) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 190 | ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 191 | lnum, err); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 192 | dump_stack(); |
Artem Bityutskiy | 83cef70 | 2011-06-03 13:45:09 +0300 | [diff] [blame] | 193 | } |
| 194 | return err; |
| 195 | } |
| 196 | |
Adrian Hunter | ff46d7b | 2008-07-21 15:39:05 +0300 | [diff] [blame] | 197 | /** |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 198 | * ubifs_check_node - check node. |
| 199 | * @c: UBIFS file-system description object |
| 200 | * @buf: node to check |
| 201 | * @lnum: logical eraseblock number |
| 202 | * @offs: offset within the logical eraseblock |
| 203 | * @quiet: print no messages |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 204 | * @must_chk_crc: indicates whether to always check the CRC |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 205 | * |
| 206 | * This function checks node magic number and CRC checksum. This function also |
| 207 | * validates node length to prevent UBIFS from becoming crazy when an attacker |
| 208 | * feeds it a file-system image with incorrect nodes. For example, too large |
| 209 | * node length in the common header could cause UBIFS to read memory outside of |
| 210 | * allocated buffer when checking the CRC checksum. |
| 211 | * |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 212 | * This function may skip data nodes CRC checking if @c->no_chk_data_crc is |
| 213 | * true, which is controlled by corresponding UBIFS mount option. However, if |
| 214 | * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is |
Artem Bityutskiy | 18d1d7f | 2011-01-17 22:27:56 +0200 | [diff] [blame] | 215 | * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are |
| 216 | * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC |
| 217 | * is checked. This is because during mounting or re-mounting from R/O mode to |
| 218 | * R/W mode we may read journal nodes (when replying the journal or doing the |
| 219 | * recovery) and the journal nodes may potentially be corrupted, so checking is |
| 220 | * required. |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 221 | * |
| 222 | * This function returns zero in case of success and %-EUCLEAN in case of bad |
| 223 | * CRC or magic. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 224 | */ |
| 225 | int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 226 | int offs, int quiet, int must_chk_crc) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 227 | { |
| 228 | int err = -EINVAL, type, node_len; |
| 229 | uint32_t crc, node_crc, magic; |
| 230 | const struct ubifs_ch *ch = buf; |
| 231 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 232 | ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); |
| 233 | ubifs_assert(c, !(offs & 7) && offs < c->leb_size); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 234 | |
| 235 | magic = le32_to_cpu(ch->magic); |
| 236 | if (magic != UBIFS_NODE_MAGIC) { |
| 237 | if (!quiet) |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 238 | ubifs_err(c, "bad magic %#08x, expected %#08x", |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 239 | magic, UBIFS_NODE_MAGIC); |
| 240 | err = -EUCLEAN; |
| 241 | goto out; |
| 242 | } |
| 243 | |
| 244 | type = ch->node_type; |
| 245 | if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { |
| 246 | if (!quiet) |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 247 | ubifs_err(c, "bad node type %d", type); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 248 | goto out; |
| 249 | } |
| 250 | |
| 251 | node_len = le32_to_cpu(ch->len); |
| 252 | if (node_len + offs > c->leb_size) |
| 253 | goto out_len; |
| 254 | |
| 255 | if (c->ranges[type].max_len == 0) { |
| 256 | if (node_len != c->ranges[type].len) |
| 257 | goto out_len; |
| 258 | } else if (node_len < c->ranges[type].min_len || |
| 259 | node_len > c->ranges[type].max_len) |
| 260 | goto out_len; |
| 261 | |
Artem Bityutskiy | 18d1d7f | 2011-01-17 22:27:56 +0200 | [diff] [blame] | 262 | if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && |
| 263 | !c->remounting_rw && c->no_chk_data_crc) |
Artem Bityutskiy | 6f7ab6d | 2009-01-27 16:12:31 +0200 | [diff] [blame] | 264 | return 0; |
Adrian Hunter | 2953e73 | 2008-09-04 16:26:00 +0300 | [diff] [blame] | 265 | |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 266 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); |
| 267 | node_crc = le32_to_cpu(ch->crc); |
| 268 | if (crc != node_crc) { |
| 269 | if (!quiet) |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 270 | ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 271 | crc, node_crc); |
| 272 | err = -EUCLEAN; |
| 273 | goto out; |
| 274 | } |
| 275 | |
| 276 | return 0; |
| 277 | |
| 278 | out_len: |
| 279 | if (!quiet) |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 280 | ubifs_err(c, "bad node length %d", node_len); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 281 | out: |
| 282 | if (!quiet) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 283 | ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); |
Artem Bityutskiy | edf6be2 | 2012-05-16 19:15:56 +0300 | [diff] [blame] | 284 | ubifs_dump_node(c, buf); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 285 | dump_stack(); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 286 | } |
| 287 | return err; |
| 288 | } |
| 289 | |
| 290 | /** |
| 291 | * ubifs_pad - pad flash space. |
| 292 | * @c: UBIFS file-system description object |
| 293 | * @buf: buffer to put padding to |
| 294 | * @pad: how many bytes to pad |
| 295 | * |
| 296 | * The flash media obliges us to write only in chunks of %c->min_io_size and |
| 297 | * when we have to write less data we add padding node to the write-buffer and |
| 298 | * pad it to the next minimal I/O unit's boundary. Padding nodes help when the |
| 299 | * media is being scanned. If the amount of wasted space is not enough to fit a |
| 300 | * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes |
| 301 | * pattern (%UBIFS_PADDING_BYTE). |
| 302 | * |
| 303 | * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is |
| 304 | * used. |
| 305 | */ |
| 306 | void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) |
| 307 | { |
| 308 | uint32_t crc; |
| 309 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 310 | ubifs_assert(c, pad >= 0 && !(pad & 7)); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 311 | |
| 312 | if (pad >= UBIFS_PAD_NODE_SZ) { |
| 313 | struct ubifs_ch *ch = buf; |
| 314 | struct ubifs_pad_node *pad_node = buf; |
| 315 | |
| 316 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); |
| 317 | ch->node_type = UBIFS_PAD_NODE; |
| 318 | ch->group_type = UBIFS_NO_NODE_GROUP; |
| 319 | ch->padding[0] = ch->padding[1] = 0; |
| 320 | ch->sqnum = 0; |
| 321 | ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ); |
| 322 | pad -= UBIFS_PAD_NODE_SZ; |
| 323 | pad_node->pad_len = cpu_to_le32(pad); |
| 324 | crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8); |
| 325 | ch->crc = cpu_to_le32(crc); |
| 326 | memset(buf + UBIFS_PAD_NODE_SZ, 0, pad); |
| 327 | } else if (pad > 0) |
| 328 | /* Too little space, padding node won't fit */ |
| 329 | memset(buf, UBIFS_PADDING_BYTE, pad); |
| 330 | } |
| 331 | |
| 332 | /** |
| 333 | * next_sqnum - get next sequence number. |
| 334 | * @c: UBIFS file-system description object |
| 335 | */ |
| 336 | static unsigned long long next_sqnum(struct ubifs_info *c) |
| 337 | { |
| 338 | unsigned long long sqnum; |
| 339 | |
| 340 | spin_lock(&c->cnt_lock); |
| 341 | sqnum = ++c->max_sqnum; |
| 342 | spin_unlock(&c->cnt_lock); |
| 343 | |
| 344 | if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { |
| 345 | if (sqnum >= SQNUM_WATERMARK) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 346 | ubifs_err(c, "sequence number overflow %llu, end of life", |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 347 | sqnum); |
| 348 | ubifs_ro_mode(c, -EINVAL); |
| 349 | } |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 350 | ubifs_warn(c, "running out of sequence numbers, end of life soon"); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | return sqnum; |
| 354 | } |
| 355 | |
Sascha Hauer | dead972 | 2018-09-07 14:36:31 +0200 | [diff] [blame] | 356 | void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad) |
| 357 | { |
| 358 | struct ubifs_ch *ch = node; |
| 359 | unsigned long long sqnum = next_sqnum(c); |
| 360 | |
| 361 | ubifs_assert(c, len >= UBIFS_CH_SZ); |
| 362 | |
| 363 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); |
| 364 | ch->len = cpu_to_le32(len); |
| 365 | ch->group_type = UBIFS_NO_NODE_GROUP; |
| 366 | ch->sqnum = cpu_to_le64(sqnum); |
| 367 | ch->padding[0] = ch->padding[1] = 0; |
| 368 | |
| 369 | if (pad) { |
| 370 | len = ALIGN(len, 8); |
| 371 | pad = ALIGN(len, c->min_io_size) - len; |
| 372 | ubifs_pad(c, node + len, pad); |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | void ubifs_crc_node(struct ubifs_info *c, void *node, int len) |
| 377 | { |
| 378 | struct ubifs_ch *ch = node; |
| 379 | uint32_t crc; |
| 380 | |
| 381 | crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); |
| 382 | ch->crc = cpu_to_le32(crc); |
| 383 | } |
| 384 | |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 385 | /** |
Sascha Hauer | a384b47 | 2018-09-07 14:36:33 +0200 | [diff] [blame] | 386 | * ubifs_prepare_node_hmac - prepare node to be written to flash. |
| 387 | * @c: UBIFS file-system description object |
| 388 | * @node: the node to pad |
| 389 | * @len: node length |
| 390 | * @hmac_offs: offset of the HMAC in the node |
| 391 | * @pad: if the buffer has to be padded |
| 392 | * |
| 393 | * This function prepares node at @node to be written to the media - it |
| 394 | * calculates node CRC, fills the common header, and adds proper padding up to |
| 395 | * the next minimum I/O unit if @pad is not zero. if @hmac_offs is positive then |
| 396 | * a HMAC is inserted into the node at the given offset. |
| 397 | * |
| 398 | * This function returns 0 for success or a negative error code otherwise. |
| 399 | */ |
| 400 | int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len, |
| 401 | int hmac_offs, int pad) |
| 402 | { |
| 403 | int err; |
| 404 | |
| 405 | ubifs_init_node(c, node, len, pad); |
| 406 | |
| 407 | if (hmac_offs > 0) { |
| 408 | err = ubifs_node_insert_hmac(c, node, len, hmac_offs); |
| 409 | if (err) |
| 410 | return err; |
| 411 | } |
| 412 | |
| 413 | ubifs_crc_node(c, node, len); |
| 414 | |
| 415 | return 0; |
| 416 | } |
| 417 | |
| 418 | /** |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 419 | * ubifs_prepare_node - prepare node to be written to flash. |
| 420 | * @c: UBIFS file-system description object |
| 421 | * @node: the node to pad |
| 422 | * @len: node length |
| 423 | * @pad: if the buffer has to be padded |
| 424 | * |
| 425 | * This function prepares node at @node to be written to the media - it |
| 426 | * calculates node CRC, fills the common header, and adds proper padding up to |
| 427 | * the next minimum I/O unit if @pad is not zero. |
| 428 | */ |
| 429 | void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) |
| 430 | { |
Sascha Hauer | a384b47 | 2018-09-07 14:36:33 +0200 | [diff] [blame] | 431 | /* |
| 432 | * Deliberately ignore return value since this function can only fail |
| 433 | * when a hmac offset is given. |
| 434 | */ |
| 435 | ubifs_prepare_node_hmac(c, node, len, 0, pad); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 436 | } |
| 437 | |
| 438 | /** |
| 439 | * ubifs_prep_grp_node - prepare node of a group to be written to flash. |
| 440 | * @c: UBIFS file-system description object |
| 441 | * @node: the node to pad |
| 442 | * @len: node length |
| 443 | * @last: indicates the last node of the group |
| 444 | * |
| 445 | * This function prepares node at @node to be written to the media - it |
| 446 | * calculates node CRC and fills the common header. |
| 447 | */ |
| 448 | void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) |
| 449 | { |
| 450 | uint32_t crc; |
| 451 | struct ubifs_ch *ch = node; |
| 452 | unsigned long long sqnum = next_sqnum(c); |
| 453 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 454 | ubifs_assert(c, len >= UBIFS_CH_SZ); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 455 | |
| 456 | ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); |
| 457 | ch->len = cpu_to_le32(len); |
| 458 | if (last) |
| 459 | ch->group_type = UBIFS_LAST_OF_NODE_GROUP; |
| 460 | else |
| 461 | ch->group_type = UBIFS_IN_NODE_GROUP; |
| 462 | ch->sqnum = cpu_to_le64(sqnum); |
| 463 | ch->padding[0] = ch->padding[1] = 0; |
| 464 | crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); |
| 465 | ch->crc = cpu_to_le32(crc); |
| 466 | } |
| 467 | |
| 468 | /** |
| 469 | * wbuf_timer_callback - write-buffer timer callback function. |
Fabian Frederick | 39274a1 | 2014-07-15 21:33:51 +0200 | [diff] [blame] | 470 | * @timer: timer data (write-buffer descriptor) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 471 | * |
| 472 | * This function is called when the write-buffer timer expires. |
| 473 | */ |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 474 | static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 475 | { |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 476 | struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 477 | |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 478 | dbg_io("jhead %s", dbg_jhead(wbuf->jhead)); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 479 | wbuf->need_sync = 1; |
| 480 | wbuf->c->need_wbuf_sync = 1; |
| 481 | ubifs_wake_up_bgt(wbuf->c); |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 482 | return HRTIMER_NORESTART; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | /** |
| 486 | * new_wbuf_timer - start new write-buffer timer. |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 487 | * @c: UBIFS file-system description object |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 488 | * @wbuf: write-buffer descriptor |
| 489 | */ |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 490 | static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 491 | { |
Rafał Miłecki | 1b7fc2c | 2016-09-20 10:36:15 +0200 | [diff] [blame] | 492 | ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10); |
| 493 | unsigned long long delta = dirty_writeback_interval; |
Rafał Miłecki | 854826c | 2016-09-20 10:36:14 +0200 | [diff] [blame] | 494 | |
Rafał Miłecki | 1b7fc2c | 2016-09-20 10:36:15 +0200 | [diff] [blame] | 495 | /* centi to milli, milli to nano, then 10% */ |
| 496 | delta *= 10ULL * NSEC_PER_MSEC / 10ULL; |
Rafał Miłecki | 854826c | 2016-09-20 10:36:14 +0200 | [diff] [blame] | 497 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 498 | ubifs_assert(c, !hrtimer_active(&wbuf->timer)); |
| 499 | ubifs_assert(c, delta <= ULONG_MAX); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 500 | |
Artem Bityutskiy | 0b335b9 | 2009-06-23 12:30:43 +0300 | [diff] [blame] | 501 | if (wbuf->no_timer) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 502 | return; |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 503 | dbg_io("set timer for jhead %s, %llu-%llu millisecs", |
| 504 | dbg_jhead(wbuf->jhead), |
Rafał Miłecki | 854826c | 2016-09-20 10:36:14 +0200 | [diff] [blame] | 505 | div_u64(ktime_to_ns(softlimit), USEC_PER_SEC), |
| 506 | div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC)); |
| 507 | hrtimer_start_range_ns(&wbuf->timer, softlimit, delta, |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 508 | HRTIMER_MODE_REL); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | /** |
| 512 | * cancel_wbuf_timer - cancel write-buffer timer. |
| 513 | * @wbuf: write-buffer descriptor |
| 514 | */ |
| 515 | static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) |
| 516 | { |
Artem Bityutskiy | 0b335b9 | 2009-06-23 12:30:43 +0300 | [diff] [blame] | 517 | if (wbuf->no_timer) |
| 518 | return; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 519 | wbuf->need_sync = 0; |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 520 | hrtimer_cancel(&wbuf->timer); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | /** |
| 524 | * ubifs_wbuf_sync_nolock - synchronize write-buffer. |
| 525 | * @wbuf: write-buffer to synchronize |
| 526 | * |
| 527 | * This function synchronizes write-buffer @buf and returns zero in case of |
| 528 | * success or a negative error code in case of failure. |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 529 | * |
| 530 | * Note, although write-buffers are of @c->max_write_size, this function does |
| 531 | * not necessarily writes all @c->max_write_size bytes to the flash. Instead, |
| 532 | * if the write-buffer is only partially filled with data, only the used part |
| 533 | * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized. |
| 534 | * This way we waste less space. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 535 | */ |
| 536 | int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) |
| 537 | { |
| 538 | struct ubifs_info *c = wbuf->c; |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 539 | int err, dirt, sync_len; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 540 | |
| 541 | cancel_wbuf_timer_nolock(wbuf); |
| 542 | if (!wbuf->used || wbuf->lnum == -1) |
| 543 | /* Write-buffer is empty or not seeked */ |
| 544 | return 0; |
| 545 | |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 546 | dbg_io("LEB %d:%d, %d bytes, jhead %s", |
| 547 | wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 548 | ubifs_assert(c, !(wbuf->avail & 7)); |
| 549 | ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size); |
| 550 | ubifs_assert(c, wbuf->size >= c->min_io_size); |
| 551 | ubifs_assert(c, wbuf->size <= c->max_write_size); |
| 552 | ubifs_assert(c, wbuf->size % c->min_io_size == 0); |
| 553 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 554 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 555 | ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 556 | |
Artem Bityutskiy | 2680d72 | 2010-09-17 16:44:28 +0300 | [diff] [blame] | 557 | if (c->ro_error) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 558 | return -EROFS; |
| 559 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 560 | /* |
| 561 | * Do not write whole write buffer but write only the minimum necessary |
| 562 | * amount of min. I/O units. |
| 563 | */ |
| 564 | sync_len = ALIGN(wbuf->used, c->min_io_size); |
| 565 | dirt = sync_len - wbuf->used; |
| 566 | if (dirt) |
| 567 | ubifs_pad(c, wbuf->buf + wbuf->used, dirt); |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 568 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); |
Artem Bityutskiy | 987226a | 2011-06-03 14:12:10 +0300 | [diff] [blame] | 569 | if (err) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 570 | return err; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 571 | |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 572 | spin_lock(&wbuf->lock); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 573 | wbuf->offs += sync_len; |
| 574 | /* |
| 575 | * Now @wbuf->offs is not necessarily aligned to @c->max_write_size. |
| 576 | * But our goal is to optimize writes and make sure we write in |
| 577 | * @c->max_write_size chunks and to @c->max_write_size-aligned offset. |
| 578 | * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make |
| 579 | * sure that @wbuf->offs + @wbuf->size is aligned to |
| 580 | * @c->max_write_size. This way we make sure that after next |
| 581 | * write-buffer flush we are again at the optimal offset (aligned to |
| 582 | * @c->max_write_size). |
| 583 | */ |
| 584 | if (c->leb_size - wbuf->offs < c->max_write_size) |
| 585 | wbuf->size = c->leb_size - wbuf->offs; |
| 586 | else if (wbuf->offs & (c->max_write_size - 1)) |
| 587 | wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; |
| 588 | else |
| 589 | wbuf->size = c->max_write_size; |
| 590 | wbuf->avail = wbuf->size; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 591 | wbuf->used = 0; |
| 592 | wbuf->next_ino = 0; |
| 593 | spin_unlock(&wbuf->lock); |
| 594 | |
| 595 | if (wbuf->sync_callback) |
| 596 | err = wbuf->sync_callback(c, wbuf->lnum, |
| 597 | c->leb_size - wbuf->offs, dirt); |
| 598 | return err; |
| 599 | } |
| 600 | |
| 601 | /** |
| 602 | * ubifs_wbuf_seek_nolock - seek write-buffer. |
| 603 | * @wbuf: write-buffer |
| 604 | * @lnum: logical eraseblock number to seek to |
| 605 | * @offs: logical eraseblock offset to seek to |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 606 | * |
Artem Bityutskiy | cb54ef8 | 2009-06-23 20:30:32 +0300 | [diff] [blame] | 607 | * This function targets the write-buffer to logical eraseblock @lnum:@offs. |
Artem Bityutskiy | cb14a18 | 2011-05-15 14:51:54 +0300 | [diff] [blame] | 608 | * The write-buffer has to be empty. Returns zero in case of success and a |
| 609 | * negative error code in case of failure. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 610 | */ |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 611 | int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 612 | { |
| 613 | const struct ubifs_info *c = wbuf->c; |
| 614 | |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 615 | dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 616 | ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt); |
| 617 | ubifs_assert(c, offs >= 0 && offs <= c->leb_size); |
| 618 | ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7)); |
| 619 | ubifs_assert(c, lnum != wbuf->lnum); |
| 620 | ubifs_assert(c, wbuf->used == 0); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 621 | |
| 622 | spin_lock(&wbuf->lock); |
| 623 | wbuf->lnum = lnum; |
| 624 | wbuf->offs = offs; |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 625 | if (c->leb_size - wbuf->offs < c->max_write_size) |
| 626 | wbuf->size = c->leb_size - wbuf->offs; |
| 627 | else if (wbuf->offs & (c->max_write_size - 1)) |
| 628 | wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; |
| 629 | else |
| 630 | wbuf->size = c->max_write_size; |
| 631 | wbuf->avail = wbuf->size; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 632 | wbuf->used = 0; |
| 633 | spin_unlock(&wbuf->lock); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 634 | |
| 635 | return 0; |
| 636 | } |
| 637 | |
| 638 | /** |
| 639 | * ubifs_bg_wbufs_sync - synchronize write-buffers. |
| 640 | * @c: UBIFS file-system description object |
| 641 | * |
| 642 | * This function is called by background thread to synchronize write-buffers. |
| 643 | * Returns zero in case of success and a negative error code in case of |
| 644 | * failure. |
| 645 | */ |
| 646 | int ubifs_bg_wbufs_sync(struct ubifs_info *c) |
| 647 | { |
| 648 | int err, i; |
| 649 | |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 650 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 651 | if (!c->need_wbuf_sync) |
| 652 | return 0; |
| 653 | c->need_wbuf_sync = 0; |
| 654 | |
Artem Bityutskiy | 2680d72 | 2010-09-17 16:44:28 +0300 | [diff] [blame] | 655 | if (c->ro_error) { |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 656 | err = -EROFS; |
| 657 | goto out_timers; |
| 658 | } |
| 659 | |
| 660 | dbg_io("synchronize"); |
| 661 | for (i = 0; i < c->jhead_cnt; i++) { |
| 662 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; |
| 663 | |
| 664 | cond_resched(); |
| 665 | |
| 666 | /* |
| 667 | * If the mutex is locked then wbuf is being changed, so |
| 668 | * synchronization is not necessary. |
| 669 | */ |
| 670 | if (mutex_is_locked(&wbuf->io_mutex)) |
| 671 | continue; |
| 672 | |
| 673 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); |
| 674 | if (!wbuf->need_sync) { |
| 675 | mutex_unlock(&wbuf->io_mutex); |
| 676 | continue; |
| 677 | } |
| 678 | |
| 679 | err = ubifs_wbuf_sync_nolock(wbuf); |
| 680 | mutex_unlock(&wbuf->io_mutex); |
| 681 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 682 | ubifs_err(c, "cannot sync write-buffer, error %d", err); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 683 | ubifs_ro_mode(c, err); |
| 684 | goto out_timers; |
| 685 | } |
| 686 | } |
| 687 | |
| 688 | return 0; |
| 689 | |
| 690 | out_timers: |
| 691 | /* Cancel all timers to prevent repeated errors */ |
| 692 | for (i = 0; i < c->jhead_cnt; i++) { |
| 693 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; |
| 694 | |
| 695 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); |
| 696 | cancel_wbuf_timer_nolock(wbuf); |
| 697 | mutex_unlock(&wbuf->io_mutex); |
| 698 | } |
| 699 | return err; |
| 700 | } |
| 701 | |
| 702 | /** |
| 703 | * ubifs_wbuf_write_nolock - write data to flash via write-buffer. |
| 704 | * @wbuf: write-buffer |
| 705 | * @buf: node to write |
| 706 | * @len: node length |
| 707 | * |
| 708 | * This function writes data to flash via write-buffer @wbuf. This means that |
| 709 | * the last piece of the node won't reach the flash media immediately if it |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 710 | * does not take whole max. write unit (@c->max_write_size). Instead, the node |
| 711 | * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or |
| 712 | * because more data are appended to the write-buffer). |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 713 | * |
| 714 | * This function returns zero in case of success and a negative error code in |
| 715 | * case of failure. If the node cannot be written because there is no more |
| 716 | * space in this logical eraseblock, %-ENOSPC is returned. |
| 717 | */ |
| 718 | int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) |
| 719 | { |
| 720 | struct ubifs_info *c = wbuf->c; |
Artem Bityutskiy | 12f3389 | 2011-05-14 17:37:47 +0300 | [diff] [blame] | 721 | int err, written, n, aligned_len = ALIGN(len, 8); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 722 | |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 723 | dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, |
| 724 | dbg_ntype(((struct ubifs_ch *)buf)->node_type), |
| 725 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 726 | ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); |
| 727 | ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); |
| 728 | ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size); |
| 729 | ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size); |
| 730 | ubifs_assert(c, wbuf->size >= c->min_io_size); |
| 731 | ubifs_assert(c, wbuf->size <= c->max_write_size); |
| 732 | ubifs_assert(c, wbuf->size % c->min_io_size == 0); |
| 733 | ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex)); |
| 734 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
| 735 | ubifs_assert(c, !c->space_fixup); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 736 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 737 | ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 738 | |
| 739 | if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { |
| 740 | err = -ENOSPC; |
| 741 | goto out; |
| 742 | } |
| 743 | |
| 744 | cancel_wbuf_timer_nolock(wbuf); |
| 745 | |
Artem Bityutskiy | 2680d72 | 2010-09-17 16:44:28 +0300 | [diff] [blame] | 746 | if (c->ro_error) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 747 | return -EROFS; |
| 748 | |
| 749 | if (aligned_len <= wbuf->avail) { |
| 750 | /* |
| 751 | * The node is not very large and fits entirely within |
| 752 | * write-buffer. |
| 753 | */ |
| 754 | memcpy(wbuf->buf + wbuf->used, buf, len); |
| 755 | |
| 756 | if (aligned_len == wbuf->avail) { |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 757 | dbg_io("flush jhead %s wbuf to LEB %d:%d", |
| 758 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); |
Artem Bityutskiy | 987226a | 2011-06-03 14:12:10 +0300 | [diff] [blame] | 759 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 760 | wbuf->offs, wbuf->size); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 761 | if (err) |
| 762 | goto out; |
| 763 | |
| 764 | spin_lock(&wbuf->lock); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 765 | wbuf->offs += wbuf->size; |
| 766 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
| 767 | wbuf->size = c->max_write_size; |
| 768 | else |
| 769 | wbuf->size = c->leb_size - wbuf->offs; |
| 770 | wbuf->avail = wbuf->size; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 771 | wbuf->used = 0; |
| 772 | wbuf->next_ino = 0; |
| 773 | spin_unlock(&wbuf->lock); |
| 774 | } else { |
| 775 | spin_lock(&wbuf->lock); |
| 776 | wbuf->avail -= aligned_len; |
| 777 | wbuf->used += aligned_len; |
| 778 | spin_unlock(&wbuf->lock); |
| 779 | } |
| 780 | |
| 781 | goto exit; |
| 782 | } |
| 783 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 784 | written = 0; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 785 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 786 | if (wbuf->used) { |
| 787 | /* |
| 788 | * The node is large enough and does not fit entirely within |
| 789 | * current available space. We have to fill and flush |
| 790 | * write-buffer and switch to the next max. write unit. |
| 791 | */ |
| 792 | dbg_io("flush jhead %s wbuf to LEB %d:%d", |
| 793 | dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); |
| 794 | memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); |
Artem Bityutskiy | 987226a | 2011-06-03 14:12:10 +0300 | [diff] [blame] | 795 | err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 796 | wbuf->size); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 797 | if (err) |
| 798 | goto out; |
| 799 | |
Artem Bityutskiy | 12f3389 | 2011-05-14 17:37:47 +0300 | [diff] [blame] | 800 | wbuf->offs += wbuf->size; |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 801 | len -= wbuf->avail; |
| 802 | aligned_len -= wbuf->avail; |
| 803 | written += wbuf->avail; |
| 804 | } else if (wbuf->offs & (c->max_write_size - 1)) { |
| 805 | /* |
| 806 | * The write-buffer offset is not aligned to |
| 807 | * @c->max_write_size and @wbuf->size is less than |
| 808 | * @c->max_write_size. Write @wbuf->size bytes to make sure the |
| 809 | * following writes are done in optimal @c->max_write_size |
| 810 | * chunks. |
| 811 | */ |
| 812 | dbg_io("write %d bytes to LEB %d:%d", |
| 813 | wbuf->size, wbuf->lnum, wbuf->offs); |
Artem Bityutskiy | 987226a | 2011-06-03 14:12:10 +0300 | [diff] [blame] | 814 | err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 815 | wbuf->size); |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 816 | if (err) |
| 817 | goto out; |
| 818 | |
Artem Bityutskiy | 12f3389 | 2011-05-14 17:37:47 +0300 | [diff] [blame] | 819 | wbuf->offs += wbuf->size; |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 820 | len -= wbuf->size; |
| 821 | aligned_len -= wbuf->size; |
| 822 | written += wbuf->size; |
| 823 | } |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 824 | |
| 825 | /* |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 826 | * The remaining data may take more whole max. write units, so write the |
| 827 | * remains multiple to max. write unit size directly to the flash media. |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 828 | * We align node length to 8-byte boundary because we anyway flash wbuf |
| 829 | * if the remaining space is less than 8 bytes. |
| 830 | */ |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 831 | n = aligned_len >> c->max_write_shift; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 832 | if (n) { |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 833 | n <<= c->max_write_shift; |
Artem Bityutskiy | 12f3389 | 2011-05-14 17:37:47 +0300 | [diff] [blame] | 834 | dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, |
| 835 | wbuf->offs); |
Artem Bityutskiy | 987226a | 2011-06-03 14:12:10 +0300 | [diff] [blame] | 836 | err = ubifs_leb_write(c, wbuf->lnum, buf + written, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 837 | wbuf->offs, n); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 838 | if (err) |
| 839 | goto out; |
Artem Bityutskiy | 12f3389 | 2011-05-14 17:37:47 +0300 | [diff] [blame] | 840 | wbuf->offs += n; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 841 | aligned_len -= n; |
| 842 | len -= n; |
| 843 | written += n; |
| 844 | } |
| 845 | |
| 846 | spin_lock(&wbuf->lock); |
| 847 | if (aligned_len) |
| 848 | /* |
| 849 | * And now we have what's left and what does not take whole |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 850 | * max. write unit, so write it to the write-buffer and we are |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 851 | * done. |
| 852 | */ |
| 853 | memcpy(wbuf->buf, buf + written, len); |
| 854 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 855 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
| 856 | wbuf->size = c->max_write_size; |
| 857 | else |
| 858 | wbuf->size = c->leb_size - wbuf->offs; |
| 859 | wbuf->avail = wbuf->size - aligned_len; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 860 | wbuf->used = aligned_len; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 861 | wbuf->next_ino = 0; |
| 862 | spin_unlock(&wbuf->lock); |
| 863 | |
| 864 | exit: |
| 865 | if (wbuf->sync_callback) { |
| 866 | int free = c->leb_size - wbuf->offs - wbuf->used; |
| 867 | |
| 868 | err = wbuf->sync_callback(c, wbuf->lnum, free, 0); |
| 869 | if (err) |
| 870 | goto out; |
| 871 | } |
| 872 | |
| 873 | if (wbuf->used) |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 874 | new_wbuf_timer_nolock(c, wbuf); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 875 | |
| 876 | return 0; |
| 877 | |
| 878 | out: |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 879 | ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 880 | len, wbuf->lnum, wbuf->offs, err); |
Artem Bityutskiy | edf6be2 | 2012-05-16 19:15:56 +0300 | [diff] [blame] | 881 | ubifs_dump_node(c, buf); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 882 | dump_stack(); |
Artem Bityutskiy | edf6be2 | 2012-05-16 19:15:56 +0300 | [diff] [blame] | 883 | ubifs_dump_leb(c, wbuf->lnum); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 884 | return err; |
| 885 | } |
| 886 | |
| 887 | /** |
Sascha Hauer | a384b47 | 2018-09-07 14:36:33 +0200 | [diff] [blame] | 888 | * ubifs_write_node_hmac - write node to the media. |
| 889 | * @c: UBIFS file-system description object |
| 890 | * @buf: the node to write |
| 891 | * @len: node length |
| 892 | * @lnum: logical eraseblock number |
| 893 | * @offs: offset within the logical eraseblock |
| 894 | * @hmac_offs: offset of the HMAC within the node |
| 895 | * |
| 896 | * This function automatically fills node magic number, assigns sequence |
| 897 | * number, and calculates node CRC checksum. The length of the @buf buffer has |
| 898 | * to be aligned to the minimal I/O unit size. This function automatically |
| 899 | * appends padding node and padding bytes if needed. Returns zero in case of |
| 900 | * success and a negative error code in case of failure. |
| 901 | */ |
| 902 | int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum, |
| 903 | int offs, int hmac_offs) |
| 904 | { |
| 905 | int err, buf_len = ALIGN(len, c->min_io_size); |
| 906 | |
| 907 | dbg_io("LEB %d:%d, %s, length %d (aligned %d)", |
| 908 | lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, |
| 909 | buf_len); |
| 910 | ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); |
| 911 | ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size); |
| 912 | ubifs_assert(c, !c->ro_media && !c->ro_mount); |
| 913 | ubifs_assert(c, !c->space_fixup); |
| 914 | |
| 915 | if (c->ro_error) |
| 916 | return -EROFS; |
| 917 | |
| 918 | err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1); |
| 919 | if (err) |
| 920 | return err; |
| 921 | |
| 922 | err = ubifs_leb_write(c, lnum, buf, offs, buf_len); |
| 923 | if (err) |
| 924 | ubifs_dump_node(c, buf); |
| 925 | |
| 926 | return err; |
| 927 | } |
| 928 | |
| 929 | /** |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 930 | * ubifs_write_node - write node to the media. |
| 931 | * @c: UBIFS file-system description object |
| 932 | * @buf: the node to write |
| 933 | * @len: node length |
| 934 | * @lnum: logical eraseblock number |
| 935 | * @offs: offset within the logical eraseblock |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 936 | * |
| 937 | * This function automatically fills node magic number, assigns sequence |
| 938 | * number, and calculates node CRC checksum. The length of the @buf buffer has |
| 939 | * to be aligned to the minimal I/O unit size. This function automatically |
| 940 | * appends padding node and padding bytes if needed. Returns zero in case of |
| 941 | * success and a negative error code in case of failure. |
| 942 | */ |
| 943 | int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 944 | int offs) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 945 | { |
Sascha Hauer | a384b47 | 2018-09-07 14:36:33 +0200 | [diff] [blame] | 946 | return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 947 | } |
| 948 | |
| 949 | /** |
| 950 | * ubifs_read_node_wbuf - read node from the media or write-buffer. |
| 951 | * @wbuf: wbuf to check for un-written data |
| 952 | * @buf: buffer to read to |
| 953 | * @type: node type |
| 954 | * @len: node length |
| 955 | * @lnum: logical eraseblock number |
| 956 | * @offs: offset within the logical eraseblock |
| 957 | * |
| 958 | * This function reads a node of known type and length, checks it and stores |
| 959 | * in @buf. If the node partially or fully sits in the write-buffer, this |
| 960 | * function takes data from the buffer, otherwise it reads the flash media. |
| 961 | * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative |
| 962 | * error code in case of failure. |
| 963 | */ |
| 964 | int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, |
| 965 | int lnum, int offs) |
| 966 | { |
| 967 | const struct ubifs_info *c = wbuf->c; |
| 968 | int err, rlen, overlap; |
| 969 | struct ubifs_ch *ch = buf; |
| 970 | |
Artem Bityutskiy | 77a7ae5 | 2009-09-15 15:03:51 +0300 | [diff] [blame] | 971 | dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, |
| 972 | dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 973 | ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); |
| 974 | ubifs_assert(c, !(offs & 7) && offs < c->leb_size); |
| 975 | ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 976 | |
| 977 | spin_lock(&wbuf->lock); |
| 978 | overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); |
| 979 | if (!overlap) { |
| 980 | /* We may safely unlock the write-buffer and read the data */ |
| 981 | spin_unlock(&wbuf->lock); |
| 982 | return ubifs_read_node(c, buf, type, len, lnum, offs); |
| 983 | } |
| 984 | |
| 985 | /* Don't read under wbuf */ |
| 986 | rlen = wbuf->offs - offs; |
| 987 | if (rlen < 0) |
| 988 | rlen = 0; |
| 989 | |
| 990 | /* Copy the rest from the write-buffer */ |
| 991 | memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); |
| 992 | spin_unlock(&wbuf->lock); |
| 993 | |
| 994 | if (rlen > 0) { |
| 995 | /* Read everything that goes before write-buffer */ |
Artem Bityutskiy | d304820 | 2011-06-03 14:03:25 +0300 | [diff] [blame] | 996 | err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); |
| 997 | if (err && err != -EBADMSG) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 998 | return err; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 999 | } |
| 1000 | |
| 1001 | if (type != ch->node_type) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 1002 | ubifs_err(c, "bad node type (%d but expected %d)", |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1003 | ch->node_type, type); |
| 1004 | goto out; |
| 1005 | } |
| 1006 | |
Adrian Hunter | 2953e73 | 2008-09-04 16:26:00 +0300 | [diff] [blame] | 1007 | err = ubifs_check_node(c, buf, lnum, offs, 0, 0); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1008 | if (err) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 1009 | ubifs_err(c, "expected node type %d", type); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1010 | return err; |
| 1011 | } |
| 1012 | |
| 1013 | rlen = le32_to_cpu(ch->len); |
| 1014 | if (rlen != len) { |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 1015 | ubifs_err(c, "bad node length %d, expected %d", rlen, len); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1016 | goto out; |
| 1017 | } |
| 1018 | |
| 1019 | return 0; |
| 1020 | |
| 1021 | out: |
Sheng Yong | 235c362 | 2015-03-20 10:39:42 +0000 | [diff] [blame] | 1022 | ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); |
Artem Bityutskiy | edf6be2 | 2012-05-16 19:15:56 +0300 | [diff] [blame] | 1023 | ubifs_dump_node(c, buf); |
Artem Bityutskiy | 7c46d0a | 2012-05-16 19:04:54 +0300 | [diff] [blame] | 1024 | dump_stack(); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1025 | return -EINVAL; |
| 1026 | } |
| 1027 | |
| 1028 | /** |
| 1029 | * ubifs_read_node - read node. |
| 1030 | * @c: UBIFS file-system description object |
| 1031 | * @buf: buffer to read to |
| 1032 | * @type: node type |
| 1033 | * @len: node length (not aligned) |
| 1034 | * @lnum: logical eraseblock number |
| 1035 | * @offs: offset within the logical eraseblock |
| 1036 | * |
| 1037 | * This function reads a node of known type and and length, checks it and |
| 1038 | * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched |
| 1039 | * and a negative error code in case of failure. |
| 1040 | */ |
| 1041 | int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, |
| 1042 | int lnum, int offs) |
| 1043 | { |
| 1044 | int err, l; |
| 1045 | struct ubifs_ch *ch = buf; |
| 1046 | |
| 1047 | dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); |
Richard Weinberger | 6eb61d5 | 2018-07-12 13:01:57 +0200 | [diff] [blame] | 1048 | ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); |
| 1049 | ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size); |
| 1050 | ubifs_assert(c, !(offs & 7) && offs < c->leb_size); |
| 1051 | ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1052 | |
Artem Bityutskiy | d304820 | 2011-06-03 14:03:25 +0300 | [diff] [blame] | 1053 | err = ubifs_leb_read(c, lnum, buf, offs, len, 0); |
| 1054 | if (err && err != -EBADMSG) |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1055 | return err; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1056 | |
| 1057 | if (type != ch->node_type) { |
Daniel Golle | 90bea5a3 | 2014-06-02 15:51:10 +0200 | [diff] [blame] | 1058 | ubifs_errc(c, "bad node type (%d but expected %d)", |
| 1059 | ch->node_type, type); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1060 | goto out; |
| 1061 | } |
| 1062 | |
Adrian Hunter | 2953e73 | 2008-09-04 16:26:00 +0300 | [diff] [blame] | 1063 | err = ubifs_check_node(c, buf, lnum, offs, 0, 0); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1064 | if (err) { |
Daniel Golle | 90bea5a3 | 2014-06-02 15:51:10 +0200 | [diff] [blame] | 1065 | ubifs_errc(c, "expected node type %d", type); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1066 | return err; |
| 1067 | } |
| 1068 | |
| 1069 | l = le32_to_cpu(ch->len); |
| 1070 | if (l != len) { |
Daniel Golle | 90bea5a3 | 2014-06-02 15:51:10 +0200 | [diff] [blame] | 1071 | ubifs_errc(c, "bad node length %d, expected %d", l, len); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1072 | goto out; |
| 1073 | } |
| 1074 | |
| 1075 | return 0; |
| 1076 | |
| 1077 | out: |
Daniel Golle | 90bea5a3 | 2014-06-02 15:51:10 +0200 | [diff] [blame] | 1078 | ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, |
| 1079 | offs, ubi_is_mapped(c->ubi, lnum)); |
| 1080 | if (!c->probing) { |
| 1081 | ubifs_dump_node(c, buf); |
| 1082 | dump_stack(); |
| 1083 | } |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1084 | return -EINVAL; |
| 1085 | } |
| 1086 | |
| 1087 | /** |
| 1088 | * ubifs_wbuf_init - initialize write-buffer. |
| 1089 | * @c: UBIFS file-system description object |
| 1090 | * @wbuf: write-buffer to initialize |
| 1091 | * |
Artem Bityutskiy | cb54ef8 | 2009-06-23 20:30:32 +0300 | [diff] [blame] | 1092 | * This function initializes write-buffer. Returns zero in case of success |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1093 | * %-ENOMEM in case of failure. |
| 1094 | */ |
| 1095 | int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) |
| 1096 | { |
| 1097 | size_t size; |
| 1098 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 1099 | wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1100 | if (!wbuf->buf) |
| 1101 | return -ENOMEM; |
| 1102 | |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 1103 | size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1104 | wbuf->inodes = kmalloc(size, GFP_KERNEL); |
| 1105 | if (!wbuf->inodes) { |
| 1106 | kfree(wbuf->buf); |
| 1107 | wbuf->buf = NULL; |
| 1108 | return -ENOMEM; |
| 1109 | } |
| 1110 | |
| 1111 | wbuf->used = 0; |
| 1112 | wbuf->lnum = wbuf->offs = -1; |
Artem Bityutskiy | 6c7f74f | 2011-02-06 14:45:26 +0200 | [diff] [blame] | 1113 | /* |
| 1114 | * If the LEB starts at the max. write size aligned address, then |
| 1115 | * write-buffer size has to be set to @c->max_write_size. Otherwise, |
| 1116 | * set it to something smaller so that it ends at the closest max. |
| 1117 | * write size boundary. |
| 1118 | */ |
| 1119 | size = c->max_write_size - (c->leb_start % c->max_write_size); |
| 1120 | wbuf->avail = wbuf->size = size; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1121 | wbuf->sync_callback = NULL; |
| 1122 | mutex_init(&wbuf->io_mutex); |
| 1123 | spin_lock_init(&wbuf->lock); |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1124 | wbuf->c = c; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1125 | wbuf->next_ino = 0; |
| 1126 | |
Artem Bityutskiy | f2c5dbd | 2009-05-28 16:24:15 +0300 | [diff] [blame] | 1127 | hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1128 | wbuf->timer.function = wbuf_timer_callback_nolock; |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1129 | return 0; |
| 1130 | } |
| 1131 | |
| 1132 | /** |
| 1133 | * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. |
Artem Bityutskiy | cb54ef8 | 2009-06-23 20:30:32 +0300 | [diff] [blame] | 1134 | * @wbuf: the write-buffer where to add |
Artem Bityutskiy | 1e51764 | 2008-07-14 19:08:37 +0300 | [diff] [blame] | 1135 | * @inum: the inode number |
| 1136 | * |
| 1137 | * This function adds an inode number to the inode array of the write-buffer. |
| 1138 | */ |
| 1139 | void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum) |
| 1140 | { |
| 1141 | if (!wbuf->buf) |
| 1142 | /* NOR flash or something similar */ |
| 1143 | return; |
| 1144 | |
| 1145 | spin_lock(&wbuf->lock); |
| 1146 | if (wbuf->used) |
| 1147 | wbuf->inodes[wbuf->next_ino++] = inum; |
| 1148 | spin_unlock(&wbuf->lock); |
| 1149 | } |
| 1150 | |
| 1151 | /** |
| 1152 | * wbuf_has_ino - returns if the wbuf contains data from the inode. |
| 1153 | * @wbuf: the write-buffer |
| 1154 | * @inum: the inode number |
| 1155 | * |
| 1156 | * This function returns with %1 if the write-buffer contains some data from the |
| 1157 | * given inode otherwise it returns with %0. |
| 1158 | */ |
| 1159 | static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum) |
| 1160 | { |
| 1161 | int i, ret = 0; |
| 1162 | |
| 1163 | spin_lock(&wbuf->lock); |
| 1164 | for (i = 0; i < wbuf->next_ino; i++) |
| 1165 | if (inum == wbuf->inodes[i]) { |
| 1166 | ret = 1; |
| 1167 | break; |
| 1168 | } |
| 1169 | spin_unlock(&wbuf->lock); |
| 1170 | |
| 1171 | return ret; |
| 1172 | } |
| 1173 | |
| 1174 | /** |
| 1175 | * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode. |
| 1176 | * @c: UBIFS file-system description object |
| 1177 | * @inode: inode to synchronize |
| 1178 | * |
| 1179 | * This function synchronizes write-buffers which contain nodes belonging to |
| 1180 | * @inode. Returns zero in case of success and a negative error code in case of |
| 1181 | * failure. |
| 1182 | */ |
| 1183 | int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) |
| 1184 | { |
| 1185 | int i, err = 0; |
| 1186 | |
| 1187 | for (i = 0; i < c->jhead_cnt; i++) { |
| 1188 | struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; |
| 1189 | |
| 1190 | if (i == GCHD) |
| 1191 | /* |
| 1192 | * GC head is special, do not look at it. Even if the |
| 1193 | * head contains something related to this inode, it is |
| 1194 | * a _copy_ of corresponding on-flash node which sits |
| 1195 | * somewhere else. |
| 1196 | */ |
| 1197 | continue; |
| 1198 | |
| 1199 | if (!wbuf_has_ino(wbuf, inode->i_ino)) |
| 1200 | continue; |
| 1201 | |
| 1202 | mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); |
| 1203 | if (wbuf_has_ino(wbuf, inode->i_ino)) |
| 1204 | err = ubifs_wbuf_sync_nolock(wbuf); |
| 1205 | mutex_unlock(&wbuf->io_mutex); |
| 1206 | |
| 1207 | if (err) { |
| 1208 | ubifs_ro_mode(c, err); |
| 1209 | return err; |
| 1210 | } |
| 1211 | } |
| 1212 | return 0; |
| 1213 | } |