Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) International Business Machines Corp., 2006 |
| 4 | * Copyright (c) Nokia Corporation, 2006, 2007 |
| 5 | * |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 6 | * Author: Artem Bityutskiy (Битюцкий Артём) |
| 7 | */ |
| 8 | |
| 9 | #ifndef __UBI_UBI_H__ |
| 10 | #define __UBI_UBI_H__ |
| 11 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 12 | #include <linux/types.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/rbtree.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/wait.h> |
| 17 | #include <linux/mutex.h> |
| 18 | #include <linux/rwsem.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/fs.h> |
| 21 | #include <linux/cdev.h> |
| 22 | #include <linux/device.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 24 | #include <linux/string.h> |
Artem Bityutskiy | 92ad8f3 | 2007-05-06 16:12:54 +0300 | [diff] [blame] | 25 | #include <linux/vmalloc.h> |
Kevin Cernekee | d9dd088 | 2009-06-09 10:59:19 -0700 | [diff] [blame] | 26 | #include <linux/notifier.h> |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 27 | #include <linux/mtd/mtd.h> |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 28 | #include <linux/mtd/ubi.h> |
Artem Bityutskiy | a758674 | 2011-03-14 17:06:52 +0200 | [diff] [blame] | 29 | #include <asm/pgtable.h> |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 30 | |
Artem Bityutskiy | 92a74f1 | 2008-02-16 15:42:52 +0200 | [diff] [blame] | 31 | #include "ubi-media.h" |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 32 | |
| 33 | /* Maximum number of supported UBI devices */ |
| 34 | #define UBI_MAX_DEVICES 32 |
| 35 | |
| 36 | /* UBI name used for character devices, sysfs, etc */ |
| 37 | #define UBI_NAME_STR "ubi" |
| 38 | |
Joe Perches | 58d303d | 2016-02-25 09:25:20 -0800 | [diff] [blame] | 39 | struct ubi_device; |
| 40 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 41 | /* Normal UBI messages */ |
Joe Perches | 58d303d | 2016-02-25 09:25:20 -0800 | [diff] [blame] | 42 | __printf(2, 3) |
| 43 | void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...); |
| 44 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 45 | /* UBI warning messages */ |
Joe Perches | 58d303d | 2016-02-25 09:25:20 -0800 | [diff] [blame] | 46 | __printf(2, 3) |
| 47 | void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...); |
| 48 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 49 | /* UBI error messages */ |
Joe Perches | 58d303d | 2016-02-25 09:25:20 -0800 | [diff] [blame] | 50 | __printf(2, 3) |
| 51 | void ubi_err(const struct ubi_device *ubi, const char *fmt, ...); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 52 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 53 | /* Background thread name pattern */ |
| 54 | #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" |
| 55 | |
Artem Bityutskiy | 05a3cb7 | 2012-05-20 21:14:22 +0300 | [diff] [blame] | 56 | /* |
| 57 | * This marker in the EBA table means that the LEB is um-mapped. |
| 58 | * NOTE! It has to have the same value as %UBI_ALL. |
| 59 | */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 60 | #define UBI_LEB_UNMAPPED -1 |
| 61 | |
| 62 | /* |
| 63 | * In case of errors, UBI tries to repeat the operation several times before |
| 64 | * returning error. The below constant defines how many times UBI re-tries. |
| 65 | */ |
| 66 | #define UBI_IO_RETRIES 3 |
| 67 | |
| 68 | /* |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 69 | * Length of the protection queue. The length is effectively equivalent to the |
| 70 | * number of (global) erase cycles PEBs are protected from the wear-leveling |
| 71 | * worker. |
| 72 | */ |
| 73 | #define UBI_PROT_QUEUE_LEN 10 |
| 74 | |
Artem Bityutskiy | 05a3cb7 | 2012-05-20 21:14:22 +0300 | [diff] [blame] | 75 | /* The volume ID/LEB number/erase counter is unknown */ |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 76 | #define UBI_UNKNOWN -1 |
| 77 | |
| 78 | /* |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 79 | * The UBI debugfs directory name pattern and maximum name length (3 for "ubi" |
| 80 | * + 2 for the number plus 1 for the trailing zero byte. |
| 81 | */ |
| 82 | #define UBI_DFS_DIR_NAME "ubi%d" |
| 83 | #define UBI_DFS_DIR_LEN (3 + 2 + 1) |
| 84 | |
| 85 | /* |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 86 | * Error codes returned by the I/O sub-system. |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 87 | * |
Artem Bityutskiy | 74d82d2 | 2010-09-03 02:11:20 +0300 | [diff] [blame] | 88 | * UBI_IO_FF: the read region of flash contains only 0xFFs |
Artem Bityutskiy | 92e1a7d | 2010-09-03 14:22:17 +0300 | [diff] [blame] | 89 | * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data |
| 90 | * integrity error reported by the MTD driver |
| 91 | * (uncorrectable ECC error in case of NAND) |
Artem Bityutskiy | 786d783 | 2010-04-30 16:50:22 +0300 | [diff] [blame] | 92 | * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) |
Artem Bityutskiy | 756e1df | 2010-09-03 01:30:16 +0300 | [diff] [blame] | 93 | * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a |
| 94 | * data integrity error reported by the MTD driver |
| 95 | * (uncorrectable ECC error in case of NAND) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 96 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected |
Artem Bityutskiy | 92e1a7d | 2010-09-03 14:22:17 +0300 | [diff] [blame] | 97 | * |
| 98 | * Note, it is probably better to have bit-flip and ebadmsg as flags which can |
| 99 | * be or'ed with other error code. But this is a big change because there are |
| 100 | * may callers, so it does not worth the risk of introducing a bug |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 101 | */ |
| 102 | enum { |
Artem Bityutskiy | 74d82d2 | 2010-09-03 02:11:20 +0300 | [diff] [blame] | 103 | UBI_IO_FF = 1, |
Artem Bityutskiy | 92e1a7d | 2010-09-03 14:22:17 +0300 | [diff] [blame] | 104 | UBI_IO_FF_BITFLIPS, |
Artem Bityutskiy | 786d783 | 2010-04-30 16:50:22 +0300 | [diff] [blame] | 105 | UBI_IO_BAD_HDR, |
Artem Bityutskiy | 756e1df | 2010-09-03 01:30:16 +0300 | [diff] [blame] | 106 | UBI_IO_BAD_HDR_EBADMSG, |
Artem Bityutskiy | 92e1a7d | 2010-09-03 14:22:17 +0300 | [diff] [blame] | 107 | UBI_IO_BITFLIPS, |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 108 | }; |
| 109 | |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 110 | /* |
| 111 | * Return codes of the 'ubi_eba_copy_leb()' function. |
| 112 | * |
| 113 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source |
| 114 | * PEB was put meanwhile, or there is I/O on the source PEB |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 115 | * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source |
| 116 | * PEB |
| 117 | * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target |
| 118 | * PEB |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 119 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target |
| 120 | * PEB |
Artem Bityutskiy | cc83146 | 2012-03-09 10:31:18 +0200 | [diff] [blame] | 121 | * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 122 | * target PEB |
Bhavesh Parekh | e801e12 | 2011-11-30 17:43:42 +0530 | [diff] [blame] | 123 | * MOVE_RETRY: retry scrubbing the PEB |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 124 | */ |
| 125 | enum { |
| 126 | MOVE_CANCEL_RACE = 1, |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 127 | MOVE_SOURCE_RD_ERR, |
| 128 | MOVE_TARGET_RD_ERR, |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 129 | MOVE_TARGET_WR_ERR, |
Artem Bityutskiy | cc83146 | 2012-03-09 10:31:18 +0200 | [diff] [blame] | 130 | MOVE_TARGET_BITFLIPS, |
Bhavesh Parekh | e801e12 | 2011-11-30 17:43:42 +0530 | [diff] [blame] | 131 | MOVE_RETRY, |
Artem Bityutskiy | 90bf026 | 2009-05-23 16:04:17 +0300 | [diff] [blame] | 132 | }; |
| 133 | |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 134 | /* |
| 135 | * Return codes of the fastmap sub-system |
| 136 | * |
| 137 | * UBI_NO_FASTMAP: No fastmap super block was found |
| 138 | * UBI_BAD_FASTMAP: A fastmap was found but it's unusable |
| 139 | */ |
| 140 | enum { |
| 141 | UBI_NO_FASTMAP = 1, |
| 142 | UBI_BAD_FASTMAP, |
| 143 | }; |
| 144 | |
david.oberhollenzer@sigma-star.at | 5026906 | 2015-03-26 23:59:50 +0100 | [diff] [blame] | 145 | /* |
| 146 | * Flags for emulate_power_cut in ubi_debug_info |
| 147 | * |
| 148 | * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header |
| 149 | * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header |
| 150 | */ |
| 151 | enum { |
| 152 | POWER_CUT_EC_WRITE = 0x01, |
| 153 | POWER_CUT_VID_WRITE = 0x02, |
| 154 | }; |
| 155 | |
Artem Bityutskiy | 3a8d464 | 2007-12-16 12:32:51 +0200 | [diff] [blame] | 156 | /** |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 157 | * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the |
| 158 | * flash. |
| 159 | * @hdr: a pointer to the VID header stored in buffer |
| 160 | * @buffer: underlying buffer |
| 161 | */ |
| 162 | struct ubi_vid_io_buf { |
| 163 | struct ubi_vid_hdr *hdr; |
| 164 | void *buffer; |
| 165 | }; |
| 166 | |
| 167 | /** |
Artem Bityutskiy | 06b68ba | 2007-12-16 12:49:01 +0200 | [diff] [blame] | 168 | * struct ubi_wl_entry - wear-leveling entry. |
Xiaochuan-Xu | 23553b2 | 2008-12-09 19:44:12 +0800 | [diff] [blame] | 169 | * @u.rb: link in the corresponding (free/used) RB-tree |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 170 | * @u.list: link in the protection queue |
Artem Bityutskiy | 06b68ba | 2007-12-16 12:49:01 +0200 | [diff] [blame] | 171 | * @ec: erase counter |
| 172 | * @pnum: physical eraseblock number |
| 173 | * |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 174 | * This data structure is used in the WL sub-system. Each physical eraseblock |
| 175 | * has a corresponding &struct wl_entry object which may be kept in different |
| 176 | * RB-trees. See WL sub-system for details. |
Artem Bityutskiy | 06b68ba | 2007-12-16 12:49:01 +0200 | [diff] [blame] | 177 | */ |
| 178 | struct ubi_wl_entry { |
Xiaochuan-Xu | 23553b2 | 2008-12-09 19:44:12 +0800 | [diff] [blame] | 179 | union { |
| 180 | struct rb_node rb; |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 181 | struct list_head list; |
Xiaochuan-Xu | 23553b2 | 2008-12-09 19:44:12 +0800 | [diff] [blame] | 182 | } u; |
Artem Bityutskiy | 06b68ba | 2007-12-16 12:49:01 +0200 | [diff] [blame] | 183 | int ec; |
| 184 | int pnum; |
| 185 | }; |
| 186 | |
| 187 | /** |
Artem Bityutskiy | 3a8d464 | 2007-12-16 12:32:51 +0200 | [diff] [blame] | 188 | * struct ubi_ltree_entry - an entry in the lock tree. |
| 189 | * @rb: links RB-tree nodes |
| 190 | * @vol_id: volume ID of the locked logical eraseblock |
| 191 | * @lnum: locked logical eraseblock number |
| 192 | * @users: how many tasks are using this logical eraseblock or wait for it |
| 193 | * @mutex: read/write mutex to implement read/write access serialization to |
| 194 | * the (@vol_id, @lnum) logical eraseblock |
| 195 | * |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 196 | * This data structure is used in the EBA sub-system to implement per-LEB |
| 197 | * locking. When a logical eraseblock is being locked - corresponding |
Artem Bityutskiy | 3a8d464 | 2007-12-16 12:32:51 +0200 | [diff] [blame] | 198 | * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 199 | * See EBA sub-system for details. |
Artem Bityutskiy | 3a8d464 | 2007-12-16 12:32:51 +0200 | [diff] [blame] | 200 | */ |
| 201 | struct ubi_ltree_entry { |
| 202 | struct rb_node rb; |
| 203 | int vol_id; |
| 204 | int lnum; |
| 205 | int users; |
| 206 | struct rw_semaphore mutex; |
| 207 | }; |
| 208 | |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 209 | /** |
| 210 | * struct ubi_rename_entry - volume re-name description data structure. |
| 211 | * @new_name_len: new volume name length |
| 212 | * @new_name: new volume name |
| 213 | * @remove: if not zero, this volume should be removed, not re-named |
| 214 | * @desc: descriptor of the volume |
| 215 | * @list: links re-name entries into a list |
| 216 | * |
| 217 | * This data structure is utilized in the multiple volume re-name code. Namely, |
| 218 | * UBI first creates a list of &struct ubi_rename_entry objects from the |
| 219 | * &struct ubi_rnvol_req request object, and then utilizes this list to do all |
| 220 | * the job. |
| 221 | */ |
| 222 | struct ubi_rename_entry { |
| 223 | int new_name_len; |
| 224 | char new_name[UBI_VOL_NAME_MAX + 1]; |
| 225 | int remove; |
| 226 | struct ubi_volume_desc *desc; |
| 227 | struct list_head list; |
| 228 | }; |
| 229 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 230 | struct ubi_volume_desc; |
| 231 | |
| 232 | /** |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 233 | * struct ubi_fastmap_layout - in-memory fastmap data structure. |
| 234 | * @e: PEBs used by the current fastmap |
| 235 | * @to_be_tortured: if non-zero tortured this PEB |
| 236 | * @used_blocks: number of used PEBs |
| 237 | * @max_pool_size: maximal size of the user pool |
| 238 | * @max_wl_pool_size: maximal size of the pool used by the WL sub-system |
| 239 | */ |
| 240 | struct ubi_fastmap_layout { |
| 241 | struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS]; |
| 242 | int to_be_tortured[UBI_FM_MAX_BLOCKS]; |
| 243 | int used_blocks; |
| 244 | int max_pool_size; |
| 245 | int max_wl_pool_size; |
| 246 | }; |
| 247 | |
| 248 | /** |
| 249 | * struct ubi_fm_pool - in-memory fastmap pool |
| 250 | * @pebs: PEBs in this pool |
| 251 | * @used: number of used PEBs |
| 252 | * @size: total number of PEBs in this pool |
| 253 | * @max_size: maximal size of the pool |
| 254 | * |
| 255 | * A pool gets filled with up to max_size. |
| 256 | * If all PEBs within the pool are used a new fastmap will be written |
| 257 | * to the flash and the pool gets refilled with empty PEBs. |
| 258 | * |
| 259 | */ |
| 260 | struct ubi_fm_pool { |
| 261 | int pebs[UBI_FM_MAX_POOL_SIZE]; |
| 262 | int used; |
| 263 | int size; |
| 264 | int max_size; |
| 265 | }; |
| 266 | |
| 267 | /** |
Boris Brezillon | 1f81a5c | 2016-09-16 16:59:24 +0200 | [diff] [blame] | 268 | * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor |
| 269 | * @lnum: the logical eraseblock number |
| 270 | * @pnum: the physical eraseblock where the LEB can be found |
| 271 | * |
| 272 | * This structure is here to hide EBA's internal from other part of the |
| 273 | * UBI implementation. |
| 274 | * |
| 275 | * One can query the position of a LEB by calling ubi_eba_get_ldesc(). |
| 276 | */ |
| 277 | struct ubi_eba_leb_desc { |
| 278 | int lnum; |
| 279 | int pnum; |
| 280 | }; |
| 281 | |
| 282 | /** |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 283 | * struct ubi_volume - UBI volume description data structure. |
| 284 | * @dev: device object to make use of the the Linux device model |
| 285 | * @cdev: character device object to create character device |
| 286 | * @ubi: reference to the UBI device description object |
| 287 | * @vol_id: volume ID |
Artem Bityutskiy | d05c77a | 2007-12-17 15:42:57 +0200 | [diff] [blame] | 288 | * @ref_count: volume reference count |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 289 | * @readers: number of users holding this volume in read-only mode |
| 290 | * @writers: number of users holding this volume in read-write mode |
| 291 | * @exclusive: whether somebody holds this volume in exclusive mode |
Richard Weinberger | fafdd2b | 2014-11-24 22:30:09 +0100 | [diff] [blame] | 292 | * @metaonly: whether somebody is altering only meta data of this volume |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 293 | * |
| 294 | * @reserved_pebs: how many physical eraseblocks are reserved for this volume |
| 295 | * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) |
| 296 | * @usable_leb_size: logical eraseblock size without padding |
| 297 | * @used_ebs: how many logical eraseblocks in this volume contain data |
| 298 | * @last_eb_bytes: how many bytes are stored in the last logical eraseblock |
| 299 | * @used_bytes: how many bytes of data this volume contains |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 300 | * @alignment: volume alignment |
| 301 | * @data_pad: how many bytes are not used at the end of physical eraseblocks to |
Artem Bityutskiy | d05c77a | 2007-12-17 15:42:57 +0200 | [diff] [blame] | 302 | * satisfy the requested alignment |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 303 | * @name_len: volume name length |
| 304 | * @name: volume name |
| 305 | * |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 306 | * @upd_ebs: how many eraseblocks are expected to be updated |
Artem Bityutskiy | e653879 | 2008-01-24 18:48:21 +0200 | [diff] [blame] | 307 | * @ch_lnum: LEB number which is being changing by the atomic LEB change |
| 308 | * operation |
Artem Bityutskiy | e653879 | 2008-01-24 18:48:21 +0200 | [diff] [blame] | 309 | * @upd_bytes: how many bytes are expected to be received for volume update or |
| 310 | * atomic LEB change |
| 311 | * @upd_received: how many bytes were already received for volume update or |
| 312 | * atomic LEB change |
| 313 | * @upd_buf: update buffer which is used to collect update data or data for |
| 314 | * atomic LEB change |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 315 | * |
| 316 | * @eba_tbl: EBA table of this volume (LEB->PEB mapping) |
Quentin Schulz | 6265251 | 2018-07-02 11:43:50 +0200 | [diff] [blame] | 317 | * @skip_check: %1 if CRC check of this static volume should be skipped. |
| 318 | * Directly reflects the presence of the |
| 319 | * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry |
Artem Bityutskiy | 896c0c0 | 2008-01-16 14:24:14 +0200 | [diff] [blame] | 320 | * @checked: %1 if this static volume was checked |
| 321 | * @corrupted: %1 if the volume is corrupted (static volumes only) |
| 322 | * @upd_marker: %1 if the update marker is set for this volume |
| 323 | * @updating: %1 if the volume is being updated |
Artem Bityutskiy | e653879 | 2008-01-24 18:48:21 +0200 | [diff] [blame] | 324 | * @changing_leb: %1 if the atomic LEB change ioctl command is in progress |
Sidney Amani | 766fb95 | 2009-01-27 10:11:46 +0100 | [diff] [blame] | 325 | * @direct_writes: %1 if direct writes are enabled for this volume |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 326 | * |
Richard Weinberger | 34653fd | 2018-05-28 22:04:33 +0200 | [diff] [blame] | 327 | * @checkmap: bitmap to remember which PEB->LEB mappings got checked, |
| 328 | * protected by UBI LEB lock tree. |
| 329 | * |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 330 | * The @corrupted field indicates that the volume's contents is corrupted. |
| 331 | * Since UBI protects only static volumes, this field is not relevant to |
| 332 | * dynamic volumes - it is user's responsibility to assure their data |
| 333 | * integrity. |
| 334 | * |
| 335 | * The @upd_marker flag indicates that this volume is either being updated at |
| 336 | * the moment or is damaged because of an unclean reboot. |
| 337 | */ |
| 338 | struct ubi_volume { |
| 339 | struct device dev; |
| 340 | struct cdev cdev; |
| 341 | struct ubi_device *ubi; |
| 342 | int vol_id; |
Artem Bityutskiy | d05c77a | 2007-12-17 15:42:57 +0200 | [diff] [blame] | 343 | int ref_count; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 344 | int readers; |
| 345 | int writers; |
| 346 | int exclusive; |
Richard Weinberger | fafdd2b | 2014-11-24 22:30:09 +0100 | [diff] [blame] | 347 | int metaonly; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 348 | |
| 349 | int reserved_pebs; |
| 350 | int vol_type; |
| 351 | int usable_leb_size; |
| 352 | int used_ebs; |
| 353 | int last_eb_bytes; |
| 354 | long long used_bytes; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 355 | int alignment; |
| 356 | int data_pad; |
| 357 | int name_len; |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 358 | char name[UBI_VOL_NAME_MAX + 1]; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 359 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 360 | int upd_ebs; |
Artem Bityutskiy | e653879 | 2008-01-24 18:48:21 +0200 | [diff] [blame] | 361 | int ch_lnum; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 362 | long long upd_bytes; |
| 363 | long long upd_received; |
| 364 | void *upd_buf; |
| 365 | |
Boris Brezillon | 799dca3 | 2016-09-16 16:59:25 +0200 | [diff] [blame] | 366 | struct ubi_eba_table *eba_tbl; |
Quentin Schulz | 6265251 | 2018-07-02 11:43:50 +0200 | [diff] [blame] | 367 | unsigned int skip_check:1; |
Harvey Harrison | 8eee9f1 | 2008-02-15 10:47:51 +0200 | [diff] [blame] | 368 | unsigned int checked:1; |
| 369 | unsigned int corrupted:1; |
| 370 | unsigned int upd_marker:1; |
| 371 | unsigned int updating:1; |
| 372 | unsigned int changing_leb:1; |
Sidney Amani | 766fb95 | 2009-01-27 10:11:46 +0100 | [diff] [blame] | 373 | unsigned int direct_writes:1; |
Richard Weinberger | 34653fd | 2018-05-28 22:04:33 +0200 | [diff] [blame] | 374 | |
| 375 | #ifdef CONFIG_MTD_UBI_FASTMAP |
| 376 | unsigned long *checkmap; |
| 377 | #endif |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 378 | }; |
| 379 | |
| 380 | /** |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 381 | * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 382 | * @vol: reference to the corresponding volume description object |
Richard Weinberger | fafdd2b | 2014-11-24 22:30:09 +0100 | [diff] [blame] | 383 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE |
| 384 | * or %UBI_METAONLY) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 385 | */ |
| 386 | struct ubi_volume_desc { |
| 387 | struct ubi_volume *vol; |
| 388 | int mode; |
| 389 | }; |
| 390 | |
| 391 | struct ubi_wl_entry; |
| 392 | |
| 393 | /** |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 394 | * struct ubi_debug_info - debugging information for an UBI device. |
| 395 | * |
| 396 | * @chk_gen: if UBI general extra checks are enabled |
| 397 | * @chk_io: if UBI I/O extra checks are enabled |
Richard Weinberger | 5fa7fa5 | 2014-09-22 11:44:50 +0200 | [diff] [blame] | 398 | * @chk_fastmap: if UBI fastmap extra checks are enabled |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 399 | * @disable_bgt: disable the background task for testing purposes |
| 400 | * @emulate_bitflips: emulate bit-flips for testing purposes |
| 401 | * @emulate_io_failures: emulate write/erase failures for testing purposes |
david.oberhollenzer@sigma-star.at | 5026906 | 2015-03-26 23:59:50 +0100 | [diff] [blame] | 402 | * @emulate_power_cut: emulate power cut for testing purposes |
| 403 | * @power_cut_counter: count down for writes left until emulated power cut |
| 404 | * @power_cut_min: minimum number of writes before emulating a power cut |
| 405 | * @power_cut_max: maximum number of writes until emulating a power cut |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 406 | * @dfs_dir_name: name of debugfs directory containing files of this UBI device |
| 407 | * @dfs_dir: direntry object of the UBI device debugfs directory |
| 408 | * @dfs_chk_gen: debugfs knob to enable UBI general extra checks |
| 409 | * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks |
Richard Weinberger | 5fa7fa5 | 2014-09-22 11:44:50 +0200 | [diff] [blame] | 410 | * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 411 | * @dfs_disable_bgt: debugfs knob to disable the background task |
| 412 | * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips |
| 413 | * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures |
david.oberhollenzer@sigma-star.at | 5026906 | 2015-03-26 23:59:50 +0100 | [diff] [blame] | 414 | * @dfs_emulate_power_cut: debugfs knob to emulate power cuts |
| 415 | * @dfs_power_cut_min: debugfs knob for minimum writes before power cut |
| 416 | * @dfs_power_cut_max: debugfs knob for maximum writes until power cut |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 417 | */ |
| 418 | struct ubi_debug_info { |
| 419 | unsigned int chk_gen:1; |
| 420 | unsigned int chk_io:1; |
Richard Weinberger | 5fa7fa5 | 2014-09-22 11:44:50 +0200 | [diff] [blame] | 421 | unsigned int chk_fastmap:1; |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 422 | unsigned int disable_bgt:1; |
| 423 | unsigned int emulate_bitflips:1; |
| 424 | unsigned int emulate_io_failures:1; |
david.oberhollenzer@sigma-star.at | 5026906 | 2015-03-26 23:59:50 +0100 | [diff] [blame] | 425 | unsigned int emulate_power_cut:2; |
| 426 | unsigned int power_cut_counter; |
| 427 | unsigned int power_cut_min; |
| 428 | unsigned int power_cut_max; |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 429 | char dfs_dir_name[UBI_DFS_DIR_LEN + 1]; |
| 430 | struct dentry *dfs_dir; |
| 431 | struct dentry *dfs_chk_gen; |
| 432 | struct dentry *dfs_chk_io; |
Richard Weinberger | 5fa7fa5 | 2014-09-22 11:44:50 +0200 | [diff] [blame] | 433 | struct dentry *dfs_chk_fastmap; |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 434 | struct dentry *dfs_disable_bgt; |
| 435 | struct dentry *dfs_emulate_bitflips; |
| 436 | struct dentry *dfs_emulate_io_failures; |
david.oberhollenzer@sigma-star.at | 5026906 | 2015-03-26 23:59:50 +0100 | [diff] [blame] | 437 | struct dentry *dfs_emulate_power_cut; |
| 438 | struct dentry *dfs_power_cut_min; |
| 439 | struct dentry *dfs_power_cut_max; |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 440 | }; |
| 441 | |
| 442 | /** |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 443 | * struct ubi_device - UBI device description structure |
Artem Bityutskiy | 9f961b5 | 2007-12-16 16:59:31 +0200 | [diff] [blame] | 444 | * @dev: UBI device object to use the the Linux device model |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 445 | * @cdev: character device object to create character device |
| 446 | * @ubi_num: UBI device number |
| 447 | * @ubi_name: UBI device name |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 448 | * @vol_count: number of volumes in this UBI device |
| 449 | * @volumes: volumes of this UBI device |
| 450 | * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, |
Artem Bityutskiy | d05c77a | 2007-12-17 15:42:57 +0200 | [diff] [blame] | 451 | * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, |
| 452 | * @vol->readers, @vol->writers, @vol->exclusive, |
Richard Weinberger | fafdd2b | 2014-11-24 22:30:09 +0100 | [diff] [blame] | 453 | * @vol->metaonly, @vol->ref_count, @vol->mapping and |
| 454 | * @vol->eba_tbl. |
Artem Bityutskiy | e73f445 | 2007-12-17 17:37:26 +0200 | [diff] [blame] | 455 | * @ref_count: count of references on the UBI device |
Adrian Hunter | 0c6c7fa | 2009-06-26 14:58:01 +0300 | [diff] [blame] | 456 | * @image_seq: image sequence number recorded on EC headers |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 457 | * |
| 458 | * @rsvd_pebs: count of reserved physical eraseblocks |
| 459 | * @avail_pebs: count of available physical eraseblocks |
| 460 | * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB |
Artem Bityutskiy | 4ccf8cf | 2008-01-16 15:44:24 +0200 | [diff] [blame] | 461 | * handling |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 462 | * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling |
| 463 | * |
Artem Bityutskiy | 4ccf8cf | 2008-01-16 15:44:24 +0200 | [diff] [blame] | 464 | * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 465 | * of UBI initialization |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 466 | * @vtbl_slots: how many slots are available in the volume table |
| 467 | * @vtbl_size: size of the volume table in bytes |
| 468 | * @vtbl: in-RAM volume table copy |
Artem Bityutskiy | f089c0b | 2009-05-07 11:46:49 +0300 | [diff] [blame] | 469 | * @device_mutex: protects on-flash volume table and serializes volume |
| 470 | * creation, deletion, update, re-size, re-name and set |
| 471 | * property |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 472 | * |
| 473 | * @max_ec: current highest erase counter value |
| 474 | * @mean_ec: current mean erase counter value |
| 475 | * |
Artem Bityutskiy | e8823bd | 2007-09-13 14:28:14 +0300 | [diff] [blame] | 476 | * @global_sqnum: global sequence number |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 477 | * @ltree_lock: protects the lock tree and @global_sqnum |
| 478 | * @ltree: the lock tree |
Artem Bityutskiy | e8823bd | 2007-09-13 14:28:14 +0300 | [diff] [blame] | 479 | * @alc_mutex: serializes "atomic LEB change" operations |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 480 | * |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 481 | * @fm_disabled: non-zero if fastmap is disabled (default) |
| 482 | * @fm: in-memory data structure of the currently used fastmap |
| 483 | * @fm_pool: in-memory data structure of the fastmap pool |
| 484 | * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL |
| 485 | * sub-system |
Richard Weinberger | 111ab0b | 2014-11-10 16:28:08 +0100 | [diff] [blame] | 486 | * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure |
| 487 | * that critical sections cannot be interrupted by ubi_update_fastmap() |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 488 | * @fm_buf: vmalloc()'d buffer which holds the raw fastmap |
| 489 | * @fm_size: fastmap size in bytes |
Richard Weinberger | 111ab0b | 2014-11-10 16:28:08 +0100 | [diff] [blame] | 490 | * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 491 | * @fm_work: fastmap work queue |
Richard Weinberger | 19371d7 | 2014-09-23 19:29:05 +0200 | [diff] [blame] | 492 | * @fm_work_scheduled: non-zero if fastmap work was scheduled |
Richard Weinberger | 1900149 | 2016-04-26 16:39:48 +0200 | [diff] [blame] | 493 | * @fast_attach: non-zero if UBI was attached by fastmap |
Sascha Hauer | f9c34bb | 2019-11-05 09:12:51 +0100 | [diff] [blame] | 494 | * @fm_anchor: The next anchor PEB to use for fastmap |
| 495 | * @fm_do_produce_anchor: If true produce an anchor PEB in wl |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 496 | * |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 497 | * @used: RB-tree of used physical eraseblocks |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 498 | * @erroneous: RB-tree of erroneous used physical eraseblocks |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 499 | * @free: RB-tree of free physical eraseblocks |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 500 | * @free_count: Contains the number of elements in @free |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 501 | * @scrub: RB-tree of physical eraseblocks which need scrubbing |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 502 | * @pq: protection queue (contain physical eraseblocks which are temporarily |
| 503 | * protected from the wear-leveling worker) |
| 504 | * @pq_head: protection queue head |
| 505 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
Artem Bityutskiy | feddbb3 | 2011-03-28 10:12:25 +0300 | [diff] [blame] | 506 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, |
Richard Weinberger | d59f21b | 2014-10-02 15:00:35 +0200 | [diff] [blame] | 507 | * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool, |
| 508 | * and @fm_wl_pool fields |
Artem Bityutskiy | 43f9b25 | 2007-12-18 15:06:55 +0200 | [diff] [blame] | 509 | * @move_mutex: serializes eraseblock moves |
Richard Weinberger | adfe83b | 2014-09-19 11:48:47 +0200 | [diff] [blame] | 510 | * @work_sem: used to wait for all the scheduled works to finish and prevent |
| 511 | * new works from being submitted |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 512 | * @wl_scheduled: non-zero if the wear-leveling was scheduled |
| 513 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any |
Artem Bityutskiy | d05c77a | 2007-12-17 15:42:57 +0200 | [diff] [blame] | 514 | * physical eraseblock |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 515 | * @move_from: physical eraseblock from where the data is being moved |
| 516 | * @move_to: physical eraseblock where the data is being moved to |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 517 | * @move_to_put: if the "to" PEB was put |
| 518 | * @works: list of pending works |
| 519 | * @works_count: count of pending works |
| 520 | * @bgt_thread: background thread description object |
| 521 | * @thread_enabled: if the background thread is enabled |
| 522 | * @bgt_name: background thread name |
| 523 | * |
| 524 | * @flash_size: underlying MTD device size (in bytes) |
| 525 | * @peb_count: count of physical eraseblocks on the MTD device |
| 526 | * @peb_size: physical eraseblock size |
Shmulik Ladkani | 8beeb3b | 2012-07-04 11:06:00 +0300 | [diff] [blame] | 527 | * @bad_peb_limit: top limit of expected bad physical eraseblocks |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 528 | * @bad_peb_count: count of bad physical eraseblocks |
| 529 | * @good_peb_count: count of good physical eraseblocks |
Artem Bityutskiy | 5fc01ab | 2010-09-03 23:08:15 +0300 | [diff] [blame] | 530 | * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not |
| 531 | * used by UBI) |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 532 | * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous |
| 533 | * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 534 | * @min_io_size: minimal input/output unit size of the underlying MTD device |
| 535 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers |
| 536 | * @ro_mode: if the UBI device is in read-only mode |
| 537 | * @leb_size: logical eraseblock size |
| 538 | * @leb_start: starting offset of logical eraseblocks within physical |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 539 | * eraseblocks |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 540 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size |
| 541 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size |
| 542 | * @vid_hdr_offset: starting offset of the volume identifier header (might be |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 543 | * unaligned) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 544 | * @vid_hdr_aloffset: starting offset of the VID header aligned to |
Brian Norris | ab6de68 | 2015-02-28 02:23:29 -0800 | [diff] [blame] | 545 | * @hdrs_min_io_size |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 546 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset |
Andrew F. Davis | 2fae131 | 2017-01-05 14:44:49 -0600 | [diff] [blame] | 547 | * @bad_allowed: whether the MTD device admits bad physical eraseblocks or not |
Artem Bityutskiy | ebf53f4 | 2009-07-06 08:57:53 +0300 | [diff] [blame] | 548 | * @nor_flash: non-zero if working on top of NOR flash |
Artem Bityutskiy | 30b542e | 2011-01-30 18:37:33 +0200 | [diff] [blame] | 549 | * @max_write_size: maximum amount of bytes the underlying flash can write at a |
| 550 | * time (MTD write buffer size) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 551 | * @mtd: MTD device descriptor |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 552 | * |
Artem Bityutskiy | 0ca39d7 | 2012-03-08 15:29:37 +0200 | [diff] [blame] | 553 | * @peb_buf: a buffer of PEB size used for different purposes |
| 554 | * @buf_mutex: protects @peb_buf |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 555 | * @ckvol_mutex: serializes static volume checking when opening |
Artem Bityutskiy | 2a734bb | 2011-05-18 14:53:05 +0300 | [diff] [blame] | 556 | * |
| 557 | * @dbg: debugging information for this UBI device |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 558 | */ |
| 559 | struct ubi_device { |
| 560 | struct cdev cdev; |
| 561 | struct device dev; |
| 562 | int ubi_num; |
| 563 | char ubi_name[sizeof(UBI_NAME_STR)+5]; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 564 | int vol_count; |
| 565 | struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; |
| 566 | spinlock_t volumes_lock; |
Artem Bityutskiy | e73f445 | 2007-12-17 17:37:26 +0200 | [diff] [blame] | 567 | int ref_count; |
Adrian Hunter | 0c6c7fa | 2009-06-26 14:58:01 +0300 | [diff] [blame] | 568 | int image_seq; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 569 | |
| 570 | int rsvd_pebs; |
| 571 | int avail_pebs; |
| 572 | int beb_rsvd_pebs; |
| 573 | int beb_rsvd_level; |
Shmulik Ladkani | 8beeb3b | 2012-07-04 11:06:00 +0300 | [diff] [blame] | 574 | int bad_peb_limit; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 575 | |
Artem Bityutskiy | 4ccf8cf | 2008-01-16 15:44:24 +0200 | [diff] [blame] | 576 | int autoresize_vol_id; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 577 | int vtbl_slots; |
| 578 | int vtbl_size; |
| 579 | struct ubi_vtbl_record *vtbl; |
Artem Bityutskiy | f089c0b | 2009-05-07 11:46:49 +0300 | [diff] [blame] | 580 | struct mutex device_mutex; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 581 | |
| 582 | int max_ec; |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 583 | /* Note, mean_ec is not updated run-time - should be fixed */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 584 | int mean_ec; |
| 585 | |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 586 | /* EBA sub-system's stuff */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 587 | unsigned long long global_sqnum; |
| 588 | spinlock_t ltree_lock; |
| 589 | struct rb_root ltree; |
Artem Bityutskiy | e8823bd | 2007-09-13 14:28:14 +0300 | [diff] [blame] | 590 | struct mutex alc_mutex; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 591 | |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 592 | /* Fastmap stuff */ |
| 593 | int fm_disabled; |
| 594 | struct ubi_fastmap_layout *fm; |
| 595 | struct ubi_fm_pool fm_pool; |
| 596 | struct ubi_fm_pool fm_wl_pool; |
Richard Weinberger | 111ab0b | 2014-11-10 16:28:08 +0100 | [diff] [blame] | 597 | struct rw_semaphore fm_eba_sem; |
| 598 | struct rw_semaphore fm_protect; |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 599 | void *fm_buf; |
| 600 | size_t fm_size; |
| 601 | struct work_struct fm_work; |
Richard Weinberger | 19371d7 | 2014-09-23 19:29:05 +0200 | [diff] [blame] | 602 | int fm_work_scheduled; |
Richard Weinberger | 1900149 | 2016-04-26 16:39:48 +0200 | [diff] [blame] | 603 | int fast_attach; |
Sascha Hauer | f9c34bb | 2019-11-05 09:12:51 +0100 | [diff] [blame] | 604 | struct ubi_wl_entry *fm_anchor; |
| 605 | int fm_do_produce_anchor; |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 606 | |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 607 | /* Wear-leveling sub-system's stuff */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 608 | struct rb_root used; |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 609 | struct rb_root erroneous; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 610 | struct rb_root free; |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 611 | int free_count; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 612 | struct rb_root scrub; |
Xiaochuan-Xu | 7b6c32d | 2008-12-15 21:07:41 +0800 | [diff] [blame] | 613 | struct list_head pq[UBI_PROT_QUEUE_LEN]; |
| 614 | int pq_head; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 615 | spinlock_t wl_lock; |
Artem Bityutskiy | 43f9b25 | 2007-12-18 15:06:55 +0200 | [diff] [blame] | 616 | struct mutex move_mutex; |
Artem Bityutskiy | 593dd33 | 2007-12-18 15:54:35 +0200 | [diff] [blame] | 617 | struct rw_semaphore work_sem; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 618 | int wl_scheduled; |
| 619 | struct ubi_wl_entry **lookuptbl; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 620 | struct ubi_wl_entry *move_from; |
| 621 | struct ubi_wl_entry *move_to; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 622 | int move_to_put; |
| 623 | struct list_head works; |
| 624 | int works_count; |
| 625 | struct task_struct *bgt_thread; |
| 626 | int thread_enabled; |
| 627 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; |
| 628 | |
Artem Bityutskiy | 85c6e6e | 2008-07-16 10:25:56 +0300 | [diff] [blame] | 629 | /* I/O sub-system's stuff */ |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 630 | long long flash_size; |
| 631 | int peb_count; |
| 632 | int peb_size; |
| 633 | int bad_peb_count; |
| 634 | int good_peb_count; |
Artem Bityutskiy | 5fc01ab | 2010-09-03 23:08:15 +0300 | [diff] [blame] | 635 | int corr_peb_count; |
Artem Bityutskiy | b86a2c5 | 2009-05-24 14:13:34 +0300 | [diff] [blame] | 636 | int erroneous_peb_count; |
| 637 | int max_erroneous; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 638 | int min_io_size; |
| 639 | int hdrs_min_io_size; |
| 640 | int ro_mode; |
| 641 | int leb_size; |
| 642 | int leb_start; |
| 643 | int ec_hdr_alsize; |
| 644 | int vid_hdr_alsize; |
| 645 | int vid_hdr_offset; |
| 646 | int vid_hdr_aloffset; |
| 647 | int vid_hdr_shift; |
Artem Bityutskiy | ebf53f4 | 2009-07-06 08:57:53 +0300 | [diff] [blame] | 648 | unsigned int bad_allowed:1; |
| 649 | unsigned int nor_flash:1; |
Artem Bityutskiy | 30b542e | 2011-01-30 18:37:33 +0200 | [diff] [blame] | 650 | int max_write_size; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 651 | struct mtd_info *mtd; |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 652 | |
Artem Bityutskiy | 0ca39d7 | 2012-03-08 15:29:37 +0200 | [diff] [blame] | 653 | void *peb_buf; |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 654 | struct mutex buf_mutex; |
Artem Bityutskiy | 783b273 | 2007-12-25 18:13:33 +0200 | [diff] [blame] | 655 | struct mutex ckvol_mutex; |
Artem Bityutskiy | 2a734bb | 2011-05-18 14:53:05 +0300 | [diff] [blame] | 656 | |
Ezequiel Garcia | eab7377 | 2012-11-28 09:18:30 -0300 | [diff] [blame] | 657 | struct ubi_debug_info dbg; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 658 | }; |
| 659 | |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 660 | /** |
| 661 | * struct ubi_ainf_peb - attach information about a physical eraseblock. |
| 662 | * @ec: erase counter (%UBI_UNKNOWN if it is unknown) |
| 663 | * @pnum: physical eraseblock number |
Joel Reardon | 6dd3bc7 | 2012-05-16 14:20:56 +0200 | [diff] [blame] | 664 | * @vol_id: ID of the volume this LEB belongs to |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 665 | * @lnum: logical eraseblock number |
| 666 | * @scrub: if this physical eraseblock needs scrubbing |
| 667 | * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB) |
| 668 | * @sqnum: sequence number |
| 669 | * @u: unions RB-tree or @list links |
| 670 | * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects |
| 671 | * @u.list: link in one of the eraseblock lists |
| 672 | * |
| 673 | * One object of this type is allocated for each physical eraseblock when |
Joel Reardon | 6dd3bc7 | 2012-05-16 14:20:56 +0200 | [diff] [blame] | 674 | * attaching an MTD device. Note, if this PEB does not belong to any LEB / |
| 675 | * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN. |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 676 | */ |
| 677 | struct ubi_ainf_peb { |
| 678 | int ec; |
| 679 | int pnum; |
Joel Reardon | 6dd3bc7 | 2012-05-16 14:20:56 +0200 | [diff] [blame] | 680 | int vol_id; |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 681 | int lnum; |
| 682 | unsigned int scrub:1; |
| 683 | unsigned int copy_flag:1; |
| 684 | unsigned long long sqnum; |
| 685 | union { |
| 686 | struct rb_node rb; |
| 687 | struct list_head list; |
| 688 | } u; |
| 689 | }; |
| 690 | |
| 691 | /** |
| 692 | * struct ubi_ainf_volume - attaching information about a volume. |
| 693 | * @vol_id: volume ID |
| 694 | * @highest_lnum: highest logical eraseblock number in this volume |
| 695 | * @leb_count: number of logical eraseblocks in this volume |
| 696 | * @vol_type: volume type |
| 697 | * @used_ebs: number of used logical eraseblocks in this volume (only for |
| 698 | * static volumes) |
| 699 | * @last_data_size: amount of data in the last logical eraseblock of this |
| 700 | * volume (always equivalent to the usable logical eraseblock |
| 701 | * size in case of dynamic volumes) |
| 702 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume |
| 703 | * are not used (due to volume alignment) |
| 704 | * @compat: compatibility flags of this volume |
| 705 | * @rb: link in the volume RB-tree |
| 706 | * @root: root of the RB-tree containing all the eraseblock belonging to this |
| 707 | * volume (&struct ubi_ainf_peb objects) |
| 708 | * |
| 709 | * One object of this type is allocated for each volume when attaching an MTD |
| 710 | * device. |
| 711 | */ |
| 712 | struct ubi_ainf_volume { |
| 713 | int vol_id; |
| 714 | int highest_lnum; |
| 715 | int leb_count; |
| 716 | int vol_type; |
| 717 | int used_ebs; |
| 718 | int last_data_size; |
| 719 | int data_pad; |
| 720 | int compat; |
| 721 | struct rb_node rb; |
| 722 | struct rb_root root; |
| 723 | }; |
| 724 | |
| 725 | /** |
| 726 | * struct ubi_attach_info - MTD device attaching information. |
| 727 | * @volumes: root of the volume RB-tree |
| 728 | * @corr: list of corrupted physical eraseblocks |
| 729 | * @free: list of free physical eraseblocks |
| 730 | * @erase: list of physical eraseblocks which have to be erased |
| 731 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., |
| 732 | * those belonging to "preserve"-compatible internal volumes) |
Richard Weinberger | fdf10ed | 2016-06-14 10:12:15 +0200 | [diff] [blame] | 733 | * @fastmap: list of physical eraseblocks which relate to fastmap (e.g., |
| 734 | * eraseblocks of the current and not yet erased old fastmap blocks) |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 735 | * @corr_peb_count: count of PEBs in the @corr list |
| 736 | * @empty_peb_count: count of PEBs which are presumably empty (contain only |
| 737 | * 0xFF bytes) |
| 738 | * @alien_peb_count: count of PEBs in the @alien list |
| 739 | * @bad_peb_count: count of bad physical eraseblocks |
| 740 | * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked |
| 741 | * as bad yet, but which look like bad |
| 742 | * @vols_found: number of volumes found |
| 743 | * @highest_vol_id: highest volume ID |
| 744 | * @is_empty: flag indicating whether the MTD device is empty or not |
Richard Weinberger | 74f2c6e | 2016-06-14 10:12:17 +0200 | [diff] [blame] | 745 | * @force_full_scan: flag indicating whether we need to do a full scan and drop |
| 746 | all existing Fastmap data structures |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 747 | * @min_ec: lowest erase counter value |
| 748 | * @max_ec: highest erase counter value |
| 749 | * @max_sqnum: highest sequence number value |
| 750 | * @mean_ec: mean erase counter value |
| 751 | * @ec_sum: a temporary variable used when calculating @mean_ec |
| 752 | * @ec_count: a temporary variable used when calculating @mean_ec |
| 753 | * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects |
Boris Brezillon | 7b6b749 | 2016-09-16 16:59:19 +0200 | [diff] [blame] | 754 | * @ech: temporary EC header. Only available during scan |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 755 | * @vidh: temporary VID buffer. Only available during scan |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 756 | * |
| 757 | * This data structure contains the result of attaching an MTD device and may |
| 758 | * be used by other UBI sub-systems to build final UBI data structures, further |
| 759 | * error-recovery and so on. |
| 760 | */ |
| 761 | struct ubi_attach_info { |
| 762 | struct rb_root volumes; |
| 763 | struct list_head corr; |
| 764 | struct list_head free; |
| 765 | struct list_head erase; |
| 766 | struct list_head alien; |
Richard Weinberger | fdf10ed | 2016-06-14 10:12:15 +0200 | [diff] [blame] | 767 | struct list_head fastmap; |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 768 | int corr_peb_count; |
| 769 | int empty_peb_count; |
| 770 | int alien_peb_count; |
| 771 | int bad_peb_count; |
| 772 | int maybe_bad_peb_count; |
| 773 | int vols_found; |
| 774 | int highest_vol_id; |
| 775 | int is_empty; |
Richard Weinberger | 74f2c6e | 2016-06-14 10:12:17 +0200 | [diff] [blame] | 776 | int force_full_scan; |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 777 | int min_ec; |
| 778 | int max_ec; |
| 779 | unsigned long long max_sqnum; |
| 780 | int mean_ec; |
| 781 | uint64_t ec_sum; |
| 782 | int ec_count; |
| 783 | struct kmem_cache *aeb_slab_cache; |
Boris Brezillon | 7b6b749 | 2016-09-16 16:59:19 +0200 | [diff] [blame] | 784 | struct ubi_ec_hdr *ech; |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 785 | struct ubi_vid_io_buf *vidb; |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 786 | }; |
| 787 | |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 788 | /** |
| 789 | * struct ubi_work - UBI work description data structure. |
| 790 | * @list: a link in the list of pending works |
| 791 | * @func: worker function |
| 792 | * @e: physical eraseblock to erase |
| 793 | * @vol_id: the volume ID on which this erasure is being performed |
| 794 | * @lnum: the logical eraseblock number |
| 795 | * @torture: if the physical eraseblock has to be tortured |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 796 | * |
Richard Weinberger | 849271a | 2014-09-22 10:45:35 +0200 | [diff] [blame] | 797 | * The @func pointer points to the worker function. If the @shutdown argument is |
| 798 | * not zero, the worker has to free the resources and exit immediately as the |
| 799 | * WL sub-system is shutting down. |
| 800 | * The worker has to return zero in case of success and a negative error code in |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 801 | * case of failure. |
| 802 | */ |
| 803 | struct ubi_work { |
| 804 | struct list_head list; |
Richard Weinberger | 849271a | 2014-09-22 10:45:35 +0200 | [diff] [blame] | 805 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown); |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 806 | /* The below fields are only relevant to erasure works */ |
| 807 | struct ubi_wl_entry *e; |
| 808 | int vol_id; |
| 809 | int lnum; |
| 810 | int torture; |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 811 | }; |
| 812 | |
Artem Bityutskiy | 18073733 | 2011-05-18 16:38:06 +0300 | [diff] [blame] | 813 | #include "debug.h" |
| 814 | |
Artem Bityutskiy | 06b68ba | 2007-12-16 12:49:01 +0200 | [diff] [blame] | 815 | extern struct kmem_cache *ubi_wl_entry_slab; |
Jan Engelhardt | 4d187a8 | 2009-01-11 23:55:39 +0100 | [diff] [blame] | 816 | extern const struct file_operations ubi_ctrl_cdev_operations; |
| 817 | extern const struct file_operations ubi_cdev_operations; |
| 818 | extern const struct file_operations ubi_vol_cdev_operations; |
Takashi Iwai | 53cd255 | 2015-05-15 16:20:05 +0800 | [diff] [blame] | 819 | extern struct class ubi_class; |
Artem Bityutskiy | cdfa788 | 2007-12-17 20:33:20 +0200 | [diff] [blame] | 820 | extern struct mutex ubi_devices_mutex; |
Dmitry Pervushin | 0e0ee1c | 2009-04-29 19:29:38 +0400 | [diff] [blame] | 821 | extern struct blocking_notifier_head ubi_notifiers; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 822 | |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 823 | /* attach.c */ |
Boris Brezillon | 91f4285 | 2016-09-16 16:59:18 +0200 | [diff] [blame] | 824 | struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, |
| 825 | int ec); |
| 826 | void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb); |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 827 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, |
| 828 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); |
Boris Brezillon | de4c455 | 2016-09-16 16:59:14 +0200 | [diff] [blame] | 829 | struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id); |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 830 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
| 831 | int vol_id); |
| 832 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); |
| 833 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, |
| 834 | struct ubi_attach_info *ai); |
Richard Weinberger | dac6e20 | 2012-09-26 17:51:47 +0200 | [diff] [blame] | 835 | int ubi_attach(struct ubi_device *ubi, int force_scan); |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 836 | void ubi_destroy_ai(struct ubi_attach_info *ai); |
| 837 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 838 | /* vtbl.c */ |
| 839 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, |
| 840 | struct ubi_vtbl_record *vtbl_rec); |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 841 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, |
| 842 | struct list_head *rename_list); |
Artem Bityutskiy | a4e6042 | 2012-05-17 13:09:08 +0300 | [diff] [blame] | 843 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 844 | |
| 845 | /* vmt.c */ |
| 846 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 847 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 848 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); |
Artem Bityutskiy | f40ac9c | 2008-07-13 21:47:47 +0300 | [diff] [blame] | 849 | int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); |
Artem Bityutskiy | 89b96b6 | 2007-12-16 20:00:38 +0200 | [diff] [blame] | 850 | int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
| 851 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 852 | |
| 853 | /* upd.c */ |
Artem Bityutskiy | 1b68d0e | 2008-01-24 17:04:01 +0200 | [diff] [blame] | 854 | int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, |
| 855 | long long bytes); |
| 856 | int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 857 | const void __user *buf, int count); |
Artem Bityutskiy | e653879 | 2008-01-24 18:48:21 +0200 | [diff] [blame] | 858 | int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
| 859 | const struct ubi_leb_change_req *req); |
| 860 | int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, |
| 861 | const void __user *buf, int count); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 862 | |
| 863 | /* misc.c */ |
Artem Bityutskiy | 9c9ec14 | 2008-07-18 13:19:52 +0300 | [diff] [blame] | 864 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, |
| 865 | int length); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 866 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); |
Shmulik Ladkani | 87e773c | 2012-07-04 11:06:04 +0300 | [diff] [blame] | 867 | void ubi_update_reserved(struct ubi_device *ubi); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 868 | void ubi_calculate_reserved(struct ubi_device *ubi); |
Artem Bityutskiy | bb00e18 | 2010-07-31 09:37:34 +0300 | [diff] [blame] | 869 | int ubi_check_pattern(const void *buf, uint8_t patt, int size); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 870 | |
Boris Brezillon | 9a5f09a | 2016-09-16 16:59:22 +0200 | [diff] [blame] | 871 | static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum) |
| 872 | { |
| 873 | return lnum >= 0 && lnum < vol->reserved_pebs; |
| 874 | } |
| 875 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 876 | /* eba.c */ |
Boris Brezillon | 799dca3 | 2016-09-16 16:59:25 +0200 | [diff] [blame] | 877 | struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol, |
| 878 | int nentries); |
| 879 | void ubi_eba_destroy_table(struct ubi_eba_table *tbl); |
| 880 | void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst, |
| 881 | int nentries); |
| 882 | void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl); |
Boris Brezillon | 1f81a5c | 2016-09-16 16:59:24 +0200 | [diff] [blame] | 883 | void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum, |
| 884 | struct ubi_eba_leb_desc *ldesc); |
Boris Brezillon | 7554769 | 2016-09-16 16:59:23 +0200 | [diff] [blame] | 885 | bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum); |
Artem Bityutskiy | 89b96b6 | 2007-12-16 20:00:38 +0200 | [diff] [blame] | 886 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
| 887 | int lnum); |
| 888 | int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
| 889 | void *buf, int offset, int len, int check); |
Richard Weinberger | 9ff0897 | 2015-01-10 22:52:13 +0100 | [diff] [blame] | 890 | int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, |
| 891 | struct ubi_sgl *sgl, int lnum, int offset, int len, |
| 892 | int check); |
Artem Bityutskiy | 89b96b6 | 2007-12-16 20:00:38 +0200 | [diff] [blame] | 893 | int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 894 | const void *buf, int offset, int len); |
Artem Bityutskiy | 89b96b6 | 2007-12-16 20:00:38 +0200 | [diff] [blame] | 895 | int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 896 | int lnum, const void *buf, int len, int used_ebs); |
Artem Bityutskiy | 89b96b6 | 2007-12-16 20:00:38 +0200 | [diff] [blame] | 897 | int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 898 | int lnum, const void *buf, int len); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 899 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 900 | struct ubi_vid_io_buf *vidb); |
Artem Bityutskiy | 41e0cd9 | 2012-05-17 21:05:33 +0300 | [diff] [blame] | 901 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 902 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi); |
| 903 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, |
| 904 | struct ubi_attach_info *ai_scan); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 905 | |
| 906 | /* wl.c */ |
Richard Weinberger | b36a261 | 2012-05-14 17:55:51 +0200 | [diff] [blame] | 907 | int ubi_wl_get_peb(struct ubi_device *ubi); |
Joel Reardon | d36e59e | 2012-05-18 15:40:24 +0200 | [diff] [blame] | 908 | int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, |
| 909 | int pnum, int torture); |
Joel Reardon | 62f38455 | 2012-05-20 21:27:11 +0200 | [diff] [blame] | 910 | int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 911 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); |
Artem Bityutskiy | 41e0cd9 | 2012-05-17 21:05:33 +0300 | [diff] [blame] | 912 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 913 | void ubi_wl_close(struct ubi_device *ubi); |
Artem Bityutskiy | cdfa788 | 2007-12-17 20:33:20 +0200 | [diff] [blame] | 914 | int ubi_thread(void *u); |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 915 | struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor); |
| 916 | int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, |
| 917 | int lnum, int torture); |
| 918 | int ubi_is_erase_work(struct ubi_work *wrk); |
| 919 | void ubi_refill_pools(struct ubi_device *ubi); |
| 920 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi); |
Richard Weinberger | 663586c | 2018-11-07 23:16:19 +0100 | [diff] [blame] | 921 | int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 922 | |
| 923 | /* io.c */ |
| 924 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
| 925 | int len); |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 926 | int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, |
| 927 | int len); |
| 928 | int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 929 | int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); |
| 930 | int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 931 | int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 932 | struct ubi_ec_hdr *ec_hdr, int verbose); |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 933 | int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 934 | struct ubi_ec_hdr *ec_hdr); |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 935 | int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 936 | struct ubi_vid_io_buf *vidb, int verbose); |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 937 | int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 938 | struct ubi_vid_io_buf *vidb); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 939 | |
Artem Bityutskiy | e73f445 | 2007-12-17 17:37:26 +0200 | [diff] [blame] | 940 | /* build.c */ |
Richard Genoud | 256334c | 2012-08-20 18:00:11 +0200 | [diff] [blame] | 941 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
| 942 | int vid_hdr_offset, int max_beb_per1024); |
Artem Bityutskiy | cdfa788 | 2007-12-17 20:33:20 +0200 | [diff] [blame] | 943 | int ubi_detach_mtd_dev(int ubi_num, int anyway); |
Artem Bityutskiy | e73f445 | 2007-12-17 17:37:26 +0200 | [diff] [blame] | 944 | struct ubi_device *ubi_get_device(int ubi_num); |
| 945 | void ubi_put_device(struct ubi_device *ubi); |
| 946 | struct ubi_device *ubi_get_by_major(int major); |
| 947 | int ubi_major2num(int major); |
Dmitry Pervushin | 0e0ee1c | 2009-04-29 19:29:38 +0400 | [diff] [blame] | 948 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, |
| 949 | int ntype); |
| 950 | int ubi_notify_all(struct ubi_device *ubi, int ntype, |
| 951 | struct notifier_block *nb); |
| 952 | int ubi_enumerate_volumes(struct notifier_block *nb); |
Artem Bityutskiy | 47e1ec7 | 2012-05-18 12:41:17 +0300 | [diff] [blame] | 953 | void ubi_free_internal_volumes(struct ubi_device *ubi); |
Dmitry Pervushin | 0e0ee1c | 2009-04-29 19:29:38 +0400 | [diff] [blame] | 954 | |
| 955 | /* kapi.c */ |
| 956 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); |
| 957 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, |
| 958 | struct ubi_volume_info *vi); |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 959 | /* scan.c */ |
| 960 | int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, |
| 961 | int pnum, const struct ubi_vid_hdr *vid_hdr); |
| 962 | |
| 963 | /* fastmap.c */ |
Richard Weinberger | 5e0246e | 2014-10-06 15:12:16 +0200 | [diff] [blame] | 964 | #ifdef CONFIG_MTD_UBI_FASTMAP |
Richard Weinberger | 5638b33 | 2012-09-26 17:51:42 +0200 | [diff] [blame] | 965 | size_t ubi_calc_fm_size(struct ubi_device *ubi); |
| 966 | int ubi_update_fastmap(struct ubi_device *ubi); |
| 967 | int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, |
Richard Weinberger | fdf10ed | 2016-06-14 10:12:15 +0200 | [diff] [blame] | 968 | struct ubi_attach_info *scan_ai); |
Richard Weinberger | 34653fd | 2018-05-28 22:04:33 +0200 | [diff] [blame] | 969 | int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count); |
| 970 | void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol); |
Richard Weinberger | 5e0246e | 2014-10-06 15:12:16 +0200 | [diff] [blame] | 971 | #else |
| 972 | static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; } |
Rishi Gupta | 0997187 | 2019-09-19 07:08:18 +0530 | [diff] [blame] | 973 | static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; } |
Richard Weinberger | 34653fd | 2018-05-28 22:04:33 +0200 | [diff] [blame] | 974 | static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {} |
Richard Weinberger | 5e0246e | 2014-10-06 15:12:16 +0200 | [diff] [blame] | 975 | #endif |
Artem Bityutskiy | e73f445 | 2007-12-17 17:37:26 +0200 | [diff] [blame] | 976 | |
Ezequiel Garcia | 9d54c8a | 2014-02-25 13:25:22 -0300 | [diff] [blame] | 977 | /* block.c */ |
| 978 | #ifdef CONFIG_MTD_UBI_BLOCK |
| 979 | int ubiblock_init(void); |
| 980 | void ubiblock_exit(void); |
Artem Bityutskiy | 4d283ee | 2014-03-04 12:00:26 +0200 | [diff] [blame] | 981 | int ubiblock_create(struct ubi_volume_info *vi); |
| 982 | int ubiblock_remove(struct ubi_volume_info *vi); |
Ezequiel Garcia | 9d54c8a | 2014-02-25 13:25:22 -0300 | [diff] [blame] | 983 | #else |
| 984 | static inline int ubiblock_init(void) { return 0; } |
| 985 | static inline void ubiblock_exit(void) {} |
Artem Bityutskiy | 4d283ee | 2014-03-04 12:00:26 +0200 | [diff] [blame] | 986 | static inline int ubiblock_create(struct ubi_volume_info *vi) |
| 987 | { |
Ezequiel Garcia | 80744cc | 2014-03-04 07:57:43 -0300 | [diff] [blame] | 988 | return -ENOSYS; |
Artem Bityutskiy | 4d283ee | 2014-03-04 12:00:26 +0200 | [diff] [blame] | 989 | } |
| 990 | static inline int ubiblock_remove(struct ubi_volume_info *vi) |
| 991 | { |
Ezequiel Garcia | 80744cc | 2014-03-04 07:57:43 -0300 | [diff] [blame] | 992 | return -ENOSYS; |
Artem Bityutskiy | 4d283ee | 2014-03-04 12:00:26 +0200 | [diff] [blame] | 993 | } |
Ezequiel Garcia | 9d54c8a | 2014-02-25 13:25:22 -0300 | [diff] [blame] | 994 | #endif |
| 995 | |
Richard Weinberger | 23c482e | 2014-10-28 16:18:42 +0100 | [diff] [blame] | 996 | /* |
| 997 | * ubi_for_each_free_peb - walk the UBI free RB tree. |
| 998 | * @ubi: UBI device description object |
| 999 | * @e: a pointer to a ubi_wl_entry to use as cursor |
| 1000 | * @pos: a pointer to RB-tree entry type to use as a loop counter |
| 1001 | */ |
| 1002 | #define ubi_for_each_free_peb(ubi, e, tmp_rb) \ |
| 1003 | ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb) |
| 1004 | |
| 1005 | /* |
| 1006 | * ubi_for_each_used_peb - walk the UBI used RB tree. |
| 1007 | * @ubi: UBI device description object |
| 1008 | * @e: a pointer to a ubi_wl_entry to use as cursor |
| 1009 | * @pos: a pointer to RB-tree entry type to use as a loop counter |
| 1010 | */ |
| 1011 | #define ubi_for_each_used_peb(ubi, e, tmp_rb) \ |
| 1012 | ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb) |
| 1013 | |
| 1014 | /* |
| 1015 | * ubi_for_each_scub_peb - walk the UBI scub RB tree. |
| 1016 | * @ubi: UBI device description object |
| 1017 | * @e: a pointer to a ubi_wl_entry to use as cursor |
| 1018 | * @pos: a pointer to RB-tree entry type to use as a loop counter |
| 1019 | */ |
| 1020 | #define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \ |
| 1021 | ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb) |
| 1022 | |
| 1023 | /* |
| 1024 | * ubi_for_each_protected_peb - walk the UBI protection queue. |
| 1025 | * @ubi: UBI device description object |
| 1026 | * @i: a integer used as counter |
| 1027 | * @e: a pointer to a ubi_wl_entry to use as cursor |
| 1028 | */ |
| 1029 | #define ubi_for_each_protected_peb(ubi, i, e) \ |
| 1030 | for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \ |
| 1031 | list_for_each_entry((e), &(ubi->pq[(i)]), u.list) |
Ezequiel Garcia | 9d54c8a | 2014-02-25 13:25:22 -0300 | [diff] [blame] | 1032 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1033 | /* |
| 1034 | * ubi_rb_for_each_entry - walk an RB-tree. |
Anand Gadiyar | fd589a8 | 2009-07-16 17:13:03 +0200 | [diff] [blame] | 1035 | * @rb: a pointer to type 'struct rb_node' to use as a loop counter |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1036 | * @pos: a pointer to RB-tree entry type to use as a loop counter |
| 1037 | * @root: RB-tree's root |
| 1038 | * @member: the name of the 'struct rb_node' within the RB-tree entry |
| 1039 | */ |
| 1040 | #define ubi_rb_for_each_entry(rb, pos, root, member) \ |
| 1041 | for (rb = rb_first(root), \ |
| 1042 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ |
| 1043 | rb; \ |
Phil Carmody | 758d8e4 | 2009-07-23 15:29:10 +0200 | [diff] [blame] | 1044 | rb = rb_next(rb), \ |
| 1045 | pos = (rb ? container_of(rb, typeof(*pos), member) : NULL)) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1046 | |
Artem Bityutskiy | 0479ab4 | 2012-05-18 13:00:10 +0300 | [diff] [blame] | 1047 | /* |
| 1048 | * ubi_move_aeb_to_list - move a PEB from the volume tree to a list. |
| 1049 | * |
| 1050 | * @av: volume attaching information |
| 1051 | * @aeb: attaching eraseblock information |
| 1052 | * @list: the list to move to |
| 1053 | */ |
| 1054 | static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av, |
| 1055 | struct ubi_ainf_peb *aeb, |
| 1056 | struct list_head *list) |
| 1057 | { |
| 1058 | rb_erase(&aeb->u.rb, &av->root); |
| 1059 | list_add_tail(&aeb->u.list, list); |
| 1060 | } |
| 1061 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1062 | /** |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1063 | * ubi_init_vid_buf - Initialize a VID buffer |
| 1064 | * @ubi: the UBI device |
| 1065 | * @vidb: the VID buffer to initialize |
| 1066 | * @buf: the underlying buffer |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1067 | */ |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1068 | static inline void ubi_init_vid_buf(const struct ubi_device *ubi, |
| 1069 | struct ubi_vid_io_buf *vidb, |
| 1070 | void *buf) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1071 | { |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1072 | if (buf) |
| 1073 | memset(buf, 0, ubi->vid_hdr_alsize); |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1074 | |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1075 | vidb->buffer = buf; |
| 1076 | vidb->hdr = buf + ubi->vid_hdr_shift; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1077 | } |
| 1078 | |
| 1079 | /** |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1080 | * ubi_init_vid_buf - Allocate a VID buffer |
| 1081 | * @ubi: the UBI device |
| 1082 | * @gfp_flags: GFP flags to use for the allocation |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1083 | */ |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1084 | static inline struct ubi_vid_io_buf * |
| 1085 | ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1086 | { |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1087 | struct ubi_vid_io_buf *vidb; |
| 1088 | void *buf; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1089 | |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1090 | vidb = kzalloc(sizeof(*vidb), gfp_flags); |
| 1091 | if (!vidb) |
| 1092 | return NULL; |
| 1093 | |
| 1094 | buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags); |
| 1095 | if (!buf) { |
| 1096 | kfree(vidb); |
| 1097 | return NULL; |
| 1098 | } |
| 1099 | |
| 1100 | ubi_init_vid_buf(ubi, vidb, buf); |
| 1101 | |
| 1102 | return vidb; |
| 1103 | } |
| 1104 | |
| 1105 | /** |
| 1106 | * ubi_free_vid_buf - Free a VID buffer |
| 1107 | * @vidb: the VID buffer to free |
| 1108 | */ |
| 1109 | static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb) |
| 1110 | { |
| 1111 | if (!vidb) |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1112 | return; |
| 1113 | |
Boris Brezillon | 3291b52 | 2016-09-16 16:59:26 +0200 | [diff] [blame] | 1114 | kfree(vidb->buffer); |
| 1115 | kfree(vidb); |
| 1116 | } |
| 1117 | |
| 1118 | /** |
| 1119 | * ubi_get_vid_hdr - Get the VID header attached to a VID buffer |
| 1120 | * @vidb: VID buffer |
| 1121 | */ |
| 1122 | static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb) |
| 1123 | { |
| 1124 | return vidb->hdr; |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1125 | } |
| 1126 | |
| 1127 | /* |
| 1128 | * This function is equivalent to 'ubi_io_read()', but @offset is relative to |
| 1129 | * the beginning of the logical eraseblock, not to the beginning of the |
| 1130 | * physical eraseblock. |
| 1131 | */ |
| 1132 | static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, |
| 1133 | int pnum, int offset, int len) |
| 1134 | { |
| 1135 | ubi_assert(offset >= 0); |
| 1136 | return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); |
| 1137 | } |
| 1138 | |
| 1139 | /* |
| 1140 | * This function is equivalent to 'ubi_io_write()', but @offset is relative to |
| 1141 | * the beginning of the logical eraseblock, not to the beginning of the |
| 1142 | * physical eraseblock. |
| 1143 | */ |
Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 1144 | static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1145 | int pnum, int offset, int len) |
| 1146 | { |
| 1147 | ubi_assert(offset >= 0); |
| 1148 | return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); |
| 1149 | } |
| 1150 | |
| 1151 | /** |
| 1152 | * ubi_ro_mode - switch to read-only mode. |
| 1153 | * @ubi: UBI device description object |
| 1154 | */ |
| 1155 | static inline void ubi_ro_mode(struct ubi_device *ubi) |
| 1156 | { |
Artem Bityutskiy | 43f9b25 | 2007-12-18 15:06:55 +0200 | [diff] [blame] | 1157 | if (!ubi->ro_mode) { |
| 1158 | ubi->ro_mode = 1; |
Tanya Brokhman | 32608703 | 2014-10-20 19:57:00 +0300 | [diff] [blame] | 1159 | ubi_warn(ubi, "switch to read-only mode"); |
Artem Bityutskiy | 25886a3 | 2012-04-24 06:59:49 +0300 | [diff] [blame] | 1160 | dump_stack(); |
Artem Bityutskiy | 43f9b25 | 2007-12-18 15:06:55 +0200 | [diff] [blame] | 1161 | } |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1162 | } |
| 1163 | |
| 1164 | /** |
| 1165 | * vol_id2idx - get table index by volume ID. |
| 1166 | * @ubi: UBI device description object |
| 1167 | * @vol_id: volume ID |
| 1168 | */ |
| 1169 | static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) |
| 1170 | { |
| 1171 | if (vol_id >= UBI_INTERNAL_VOL_START) |
| 1172 | return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; |
| 1173 | else |
| 1174 | return vol_id; |
| 1175 | } |
| 1176 | |
| 1177 | /** |
| 1178 | * idx2vol_id - get volume ID by table index. |
| 1179 | * @ubi: UBI device description object |
| 1180 | * @idx: table index |
| 1181 | */ |
| 1182 | static inline int idx2vol_id(const struct ubi_device *ubi, int idx) |
| 1183 | { |
| 1184 | if (idx >= ubi->vtbl_slots) |
| 1185 | return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; |
| 1186 | else |
| 1187 | return idx; |
| 1188 | } |
| 1189 | |
Richard Weinberger | 243a4f8 | 2016-06-14 10:12:13 +0200 | [diff] [blame] | 1190 | /** |
| 1191 | * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume. |
| 1192 | * @vol_id: volume ID |
| 1193 | */ |
| 1194 | static inline bool ubi_is_fm_vol(int vol_id) |
| 1195 | { |
| 1196 | switch (vol_id) { |
| 1197 | case UBI_FM_SB_VOLUME_ID: |
| 1198 | case UBI_FM_DATA_VOLUME_ID: |
| 1199 | return true; |
| 1200 | } |
| 1201 | |
| 1202 | return false; |
| 1203 | } |
| 1204 | |
Richard Weinberger | fdf10ed | 2016-06-14 10:12:15 +0200 | [diff] [blame] | 1205 | /** |
| 1206 | * ubi_find_fm_block - check whether a PEB is part of the current Fastmap. |
| 1207 | * @ubi: UBI device description object |
| 1208 | * @pnum: physical eraseblock to look for |
| 1209 | * |
| 1210 | * This function returns a wear leveling object if @pnum relates to the current |
| 1211 | * fastmap, @NULL otherwise. |
| 1212 | */ |
| 1213 | static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi, |
| 1214 | int pnum) |
| 1215 | { |
| 1216 | int i; |
| 1217 | |
| 1218 | if (ubi->fm) { |
| 1219 | for (i = 0; i < ubi->fm->used_blocks; i++) { |
| 1220 | if (ubi->fm->e[i]->pnum == pnum) |
| 1221 | return ubi->fm->e[i]; |
| 1222 | } |
| 1223 | } |
| 1224 | |
| 1225 | return NULL; |
| 1226 | } |
| 1227 | |
Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1228 | #endif /* !__UBI_UBI_H__ */ |