blob: 4d4449e1df3d5a8145447834a051119fa416197b [file] [log] [blame]
Namjae Jeon9273f3d2020-02-03 21:47:19 +09001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
4 */
5
Namjae Jeonf53d47c2020-02-04 09:18:10 +09006#include <linux/version.h>
Namjae Jeon9273f3d2020-02-03 21:47:19 +09007#include <linux/init.h>
8#include <linux/buffer_head.h>
9#include <linux/mpage.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/time.h>
13#include <linux/writeback.h>
14#include <linux/uio.h>
15#include <linux/random.h>
Namjae Jeon4e3295f2020-02-08 19:13:34 +090016#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +090017#include <linux/iversion.h>
Namjae Jeonf53d47c2020-02-04 09:18:10 +090018#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +090019#include "exfat_raw.h"
20#include "exfat_fs.h"
21
Yuezhang Mo56a5afd2022-07-21 15:31:14 +090022int __exfat_write_inode(struct inode *inode, int sync)
Namjae Jeon9273f3d2020-02-03 21:47:19 +090023{
Namjae Jeon9273f3d2020-02-03 21:47:19 +090024 unsigned long long on_disk_size;
25 struct exfat_dentry *ep, *ep2;
Yuezhang Mo7771b092022-11-26 12:25:14 +090026 struct exfat_entry_set_cache es;
Namjae Jeon9273f3d2020-02-03 21:47:19 +090027 struct super_block *sb = inode->i_sb;
28 struct exfat_sb_info *sbi = EXFAT_SB(sb);
29 struct exfat_inode_info *ei = EXFAT_I(inode);
30 bool is_dir = (ei->type == TYPE_DIR) ? true : false;
31
32 if (inode->i_ino == EXFAT_ROOT_INO)
33 return 0;
34
35 /*
Christophe Vu-Brugier9c8e8442021-11-03 10:36:05 +090036 * If the inode is already unlinked, there is no need for updating it.
Namjae Jeon9273f3d2020-02-03 21:47:19 +090037 */
38 if (ei->dir.dir == DIR_DELETED)
39 return 0;
40
41 if (is_dir && ei->dir.dir == sbi->root_dir && ei->entry == -1)
42 return 0;
43
Tetsuhiro Kohadabfea1fb2020-08-03 17:23:01 +090044 exfat_set_volume_dirty(sb);
Namjae Jeon9273f3d2020-02-03 21:47:19 +090045
46 /* get the directory entry of given file or directory */
Yuezhang Mo7771b092022-11-26 12:25:14 +090047 if (exfat_get_dentry_set(&es, sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES))
Namjae Jeon9273f3d2020-02-03 21:47:19 +090048 return -EIO;
Yuezhang Mo15a51d82022-11-26 12:28:34 +090049 ep = exfat_get_dentry_cached(&es, ES_IDX_FILE);
50 ep2 = exfat_get_dentry_cached(&es, ES_IDX_STREAM);
Namjae Jeon9273f3d2020-02-03 21:47:19 +090051
52 ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode));
53
54 /* set FILE_INFO structure using the acquired struct exfat_dentry */
55 exfat_set_entry_time(sbi, &ei->i_crtime,
56 &ep->dentry.file.create_tz,
57 &ep->dentry.file.create_time,
58 &ep->dentry.file.create_date,
Tetsuhiro Kohada97727422020-05-03 23:10:31 +090059 &ep->dentry.file.create_time_cs);
Namjae Jeon9273f3d2020-02-03 21:47:19 +090060 exfat_set_entry_time(sbi, &inode->i_mtime,
61 &ep->dentry.file.modify_tz,
62 &ep->dentry.file.modify_time,
63 &ep->dentry.file.modify_date,
Tetsuhiro Kohada97727422020-05-03 23:10:31 +090064 &ep->dentry.file.modify_time_cs);
Namjae Jeon9273f3d2020-02-03 21:47:19 +090065 exfat_set_entry_time(sbi, &inode->i_atime,
66 &ep->dentry.file.access_tz,
67 &ep->dentry.file.access_time,
68 &ep->dentry.file.access_date,
69 NULL);
70
71 /* File size should be zero if there is no cluster allocated */
72 on_disk_size = i_size_read(inode);
73
74 if (ei->start_clu == EXFAT_EOF_CLUSTER)
75 on_disk_size = 0;
76
77 ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
78 ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
Yuezhang Mo56a5afd2022-07-21 15:31:14 +090079 if (on_disk_size) {
80 ep2->dentry.stream.flags = ei->flags;
81 ep2->dentry.stream.start_clu = cpu_to_le32(ei->start_clu);
82 } else {
83 ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
84 ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
85 }
Namjae Jeon9273f3d2020-02-03 21:47:19 +090086
Yuezhang Mo7771b092022-11-26 12:25:14 +090087 exfat_update_dir_chksum_with_entry_set(&es);
Yuezhang Mo7cab7292022-11-26 12:27:59 +090088 return exfat_put_dentry_set(&es, sync);
Namjae Jeon9273f3d2020-02-03 21:47:19 +090089}
90
91int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
92{
93 int ret;
94
95 mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
96 ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
97 mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
98
99 return ret;
100}
101
102void exfat_sync_inode(struct inode *inode)
103{
104 lockdep_assert_held(&EXFAT_SB(inode->i_sb)->s_lock);
105 __exfat_write_inode(inode, 1);
106}
107
108/*
109 * Input: inode, (logical) clu_offset, target allocation area
110 * Output: errcode, cluster number
111 * *clu = (~0), if it's unable to allocate a new cluster
112 */
113static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
114 unsigned int *clu, int create)
115{
Yuezhang Mo8d627e32022-07-21 10:38:21 +0900116 int ret;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900117 unsigned int last_clu;
118 struct exfat_chain new_clu;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900119 struct super_block *sb = inode->i_sb;
120 struct exfat_sb_info *sbi = EXFAT_SB(sb);
121 struct exfat_inode_info *ei = EXFAT_I(inode);
122 unsigned int local_clu_offset = clu_offset;
123 unsigned int num_to_be_allocated = 0, num_clusters = 0;
124
Christophe Vu-Brugier032c9d32021-11-03 10:41:08 +0900125 if (ei->i_size_ondisk > 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900126 num_clusters =
Christophe Vu-Brugier032c9d32021-11-03 10:41:08 +0900127 EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900128
129 if (clu_offset >= num_clusters)
130 num_to_be_allocated = clu_offset - num_clusters + 1;
131
132 if (!create && (num_to_be_allocated > 0)) {
133 *clu = EXFAT_EOF_CLUSTER;
134 return 0;
135 }
136
137 *clu = last_clu = ei->start_clu;
138
139 if (ei->flags == ALLOC_NO_FAT_CHAIN) {
140 if (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) {
141 last_clu += clu_offset - 1;
142
143 if (clu_offset == num_clusters)
144 *clu = EXFAT_EOF_CLUSTER;
145 else
146 *clu += clu_offset;
147 }
148 } else if (ei->type == TYPE_FILE) {
149 unsigned int fclus = 0;
150 int err = exfat_get_cluster(inode, clu_offset,
151 &fclus, clu, &last_clu, 1);
152 if (err)
153 return -EIO;
154
155 clu_offset -= fclus;
156 } else {
157 /* hint information */
158 if (clu_offset > 0 && ei->hint_bmap.off != EXFAT_EOF_CLUSTER &&
159 ei->hint_bmap.off > 0 && clu_offset >= ei->hint_bmap.off) {
160 clu_offset -= ei->hint_bmap.off;
161 /* hint_bmap.clu should be valid */
162 WARN_ON(ei->hint_bmap.clu < 2);
163 *clu = ei->hint_bmap.clu;
164 }
165
166 while (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) {
167 last_clu = *clu;
168 if (exfat_get_next_cluster(sb, clu))
169 return -EIO;
170 clu_offset--;
171 }
172 }
173
174 if (*clu == EXFAT_EOF_CLUSTER) {
Tetsuhiro Kohadabfea1fb2020-08-03 17:23:01 +0900175 exfat_set_volume_dirty(sb);
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900176
177 new_clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
178 EXFAT_EOF_CLUSTER : last_clu + 1;
179 new_clu.size = 0;
180 new_clu.flags = ei->flags;
181
182 /* allocate a cluster */
183 if (num_to_be_allocated < 1) {
184 /* Broken FAT (i_sze > allocated FAT) */
185 exfat_fs_error(sb, "broken FAT chain.");
186 return -EIO;
187 }
188
Hyeongseok Kim4728c612021-03-18 15:23:56 +0900189 ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu,
190 inode_needs_sync(inode));
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900191 if (ret)
192 return ret;
193
194 if (new_clu.dir == EXFAT_EOF_CLUSTER ||
195 new_clu.dir == EXFAT_FREE_CLUSTER) {
196 exfat_fs_error(sb,
197 "bogus cluster new allocated (last_clu : %u, new_clu : %u)",
198 last_clu, new_clu.dir);
199 return -EIO;
200 }
201
202 /* append to the FAT chain */
203 if (last_clu == EXFAT_EOF_CLUSTER) {
204 if (new_clu.flags == ALLOC_FAT_CHAIN)
205 ei->flags = ALLOC_FAT_CHAIN;
206 ei->start_clu = new_clu.dir;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900207 } else {
208 if (new_clu.flags != ei->flags) {
209 /* no-fat-chain bit is disabled,
210 * so fat-chain should be synced with
211 * alloc-bitmap
212 */
213 exfat_chain_cont_cluster(sb, ei->start_clu,
214 num_clusters);
215 ei->flags = ALLOC_FAT_CHAIN;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900216 }
217 if (new_clu.flags == ALLOC_FAT_CHAIN)
218 if (exfat_ent_set(sb, last_clu, new_clu.dir))
219 return -EIO;
220 }
221
222 num_clusters += num_to_be_allocated;
223 *clu = new_clu.dir;
224
Namjae Jeon3eef5402023-01-08 18:14:43 +0900225 inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900226
227 /*
228 * Move *clu pointer along FAT chains (hole care) because the
229 * caller of this function expect *clu to be the last cluster.
230 * This only works when num_to_be_allocated >= 2,
231 * *clu = (the first cluster of the allocated chain) =>
232 * (the last cluster of ...)
233 */
234 if (ei->flags == ALLOC_NO_FAT_CHAIN) {
235 *clu += num_to_be_allocated - 1;
236 } else {
237 while (num_to_be_allocated > 1) {
238 if (exfat_get_next_cluster(sb, clu))
239 return -EIO;
240 num_to_be_allocated--;
241 }
242 }
243
244 }
245
246 /* hint information */
247 ei->hint_bmap.off = local_clu_offset;
248 ei->hint_bmap.clu = *clu;
249
250 return 0;
251}
252
253static int exfat_map_new_buffer(struct exfat_inode_info *ei,
254 struct buffer_head *bh, loff_t pos)
255{
256 if (buffer_delay(bh) && pos > ei->i_size_aligned)
257 return -EIO;
258 set_buffer_new(bh);
259
260 /*
261 * Adjust i_size_aligned if i_size_ondisk is bigger than it.
262 */
263 if (ei->i_size_ondisk > ei->i_size_aligned)
264 ei->i_size_aligned = ei->i_size_ondisk;
265 return 0;
266}
267
268static int exfat_get_block(struct inode *inode, sector_t iblock,
269 struct buffer_head *bh_result, int create)
270{
271 struct exfat_inode_info *ei = EXFAT_I(inode);
272 struct super_block *sb = inode->i_sb;
273 struct exfat_sb_info *sbi = EXFAT_SB(sb);
274 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
275 int err = 0;
276 unsigned long mapped_blocks = 0;
277 unsigned int cluster, sec_offset;
278 sector_t last_block;
279 sector_t phys = 0;
280 loff_t pos;
281
282 mutex_lock(&sbi->s_lock);
283 last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
284 if (iblock >= last_block && !create)
285 goto done;
286
287 /* Is this block already allocated? */
288 err = exfat_map_cluster(inode, iblock >> sbi->sect_per_clus_bits,
289 &cluster, create);
290 if (err) {
291 if (err != -ENOSPC)
292 exfat_fs_error_ratelimit(sb,
293 "failed to bmap (inode : %p iblock : %llu, err : %d)",
294 inode, (unsigned long long)iblock, err);
295 goto unlock_ret;
296 }
297
298 if (cluster == EXFAT_EOF_CLUSTER)
299 goto done;
300
301 /* sector offset in cluster */
302 sec_offset = iblock & (sbi->sect_per_clus - 1);
303
304 phys = exfat_cluster_to_sector(sbi, cluster) + sec_offset;
305 mapped_blocks = sbi->sect_per_clus - sec_offset;
306 max_blocks = min(mapped_blocks, max_blocks);
307
308 /* Treat newly added block / cluster */
309 if (iblock < last_block)
310 create = 0;
311
312 if (create || buffer_delay(bh_result)) {
313 pos = EXFAT_BLK_TO_B((iblock + 1), sb);
314 if (ei->i_size_ondisk < pos)
315 ei->i_size_ondisk = pos;
316 }
317
318 if (create) {
319 err = exfat_map_new_buffer(ei, bh_result, pos);
320 if (err) {
321 exfat_fs_error(sb,
322 "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
323 pos, ei->i_size_aligned);
324 goto unlock_ret;
325 }
326 }
327
328 if (buffer_delay(bh_result))
329 clear_buffer_delay(bh_result);
330 map_bh(bh_result, sb, phys);
331done:
332 bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
333unlock_ret:
334 mutex_unlock(&sbi->s_lock);
335 return err;
336}
337
Matthew Wilcox (Oracle)c9d13ea2022-07-21 12:00:32 +0900338#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
339static int exfat_read_folio(struct file *file, struct folio *folio)
340{
341 return mpage_read_folio(folio, exfat_get_block);
342}
343#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900344static int exfat_readpage(struct file *file, struct page *page)
345{
346 return mpage_readpage(page, exfat_get_block);
347}
Matthew Wilcox (Oracle)c9d13ea2022-07-21 12:00:32 +0900348#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900349
Matthew Wilcox (Oracle)b457fd72020-06-16 14:31:47 +0900350#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
351static void exfat_readahead(struct readahead_control *rac)
352{
353 mpage_readahead(rac, exfat_get_block);
354}
355#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900356static int exfat_readpages(struct file *file, struct address_space *mapping,
357 struct list_head *pages, unsigned int nr_pages)
358{
359 return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
360}
Matthew Wilcox (Oracle)b457fd72020-06-16 14:31:47 +0900361#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900362
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900363#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900364static int exfat_writepage(struct page *page, struct writeback_control *wbc)
365{
366 return block_write_full_page(page, exfat_get_block, wbc);
367}
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900368#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900369
370static int exfat_writepages(struct address_space *mapping,
371 struct writeback_control *wbc)
372{
373 return mpage_writepages(mapping, wbc, exfat_get_block);
374}
375
376static void exfat_write_failed(struct address_space *mapping, loff_t to)
377{
378 struct inode *inode = mapping->host;
379
380 if (to > i_size_read(inode)) {
381 truncate_pagecache(inode, i_size_read(inode));
Yuezhang Mo63e1c4f2022-07-22 11:43:04 +0900382#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
383 inode->i_mtime = inode->i_ctime = current_time(inode);
384#else
385 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
386#endif
Yuezhang Mobf6c6682022-12-09 22:37:17 +0900387 exfat_truncate(inode);
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900388 }
389}
390
Matthew Wilcox (Oracle)7bcf79c2022-07-21 11:44:50 +0900391#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
392static int exfat_write_begin(struct file *file, struct address_space *mapping,
393 loff_t pos, unsigned int len,
394 struct page **pagep, void **fsdata)
395#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900396static int exfat_write_begin(struct file *file, struct address_space *mapping,
397 loff_t pos, unsigned int len, unsigned int flags,
398 struct page **pagep, void **fsdata)
Matthew Wilcox (Oracle)7bcf79c2022-07-21 11:44:50 +0900399#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900400{
401 int ret;
402
403 *pagep = NULL;
Matthew Wilcox (Oracle)ff027ab2022-07-21 11:43:06 +0900404#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
405 ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
406 exfat_get_block,
407 &EXFAT_I(mapping->host)->i_size_ondisk);
408#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900409 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
410 exfat_get_block,
411 &EXFAT_I(mapping->host)->i_size_ondisk);
Matthew Wilcox (Oracle)ff027ab2022-07-21 11:43:06 +0900412#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900413 if (ret < 0)
414 exfat_write_failed(mapping, pos+len);
415
416 return ret;
417}
418
419static int exfat_write_end(struct file *file, struct address_space *mapping,
420 loff_t pos, unsigned int len, unsigned int copied,
421 struct page *pagep, void *fsdata)
422{
423 struct inode *inode = mapping->host;
424 struct exfat_inode_info *ei = EXFAT_I(inode);
425 int err;
426
427 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
428
Christophe Vu-Brugier032c9d32021-11-03 10:41:08 +0900429 if (ei->i_size_aligned < i_size_read(inode)) {
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900430 exfat_fs_error(inode->i_sb,
431 "invalid size(size(%llu) > aligned(%llu)\n",
Christophe Vu-Brugier032c9d32021-11-03 10:41:08 +0900432 i_size_read(inode), ei->i_size_aligned);
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900433 return -EIO;
434 }
435
436 if (err < len)
437 exfat_write_failed(mapping, pos+len);
438
439 if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900440#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900441 inode->i_mtime = inode->i_ctime = current_time(inode);
Namjae Jeonf53d47c2020-02-04 09:18:10 +0900442#else
443 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
444#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900445 ei->attr |= ATTR_ARCHIVE;
446 mark_inode_dirty(inode);
447 }
448
449 return err;
450}
451
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900452
453#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900454static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900455#else
456static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
457 loff_t offset)
458#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900459{
460 struct address_space *mapping = iocb->ki_filp->f_mapping;
461 struct inode *inode = mapping->host;
462 loff_t size = iocb->ki_pos + iov_iter_count(iter);
463 int rw = iov_iter_rw(iter);
464 ssize_t ret;
465
466 if (rw == WRITE) {
467 /*
468 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
469 * so we need to update the ->i_size_aligned to block boundary.
470 *
471 * But we must fill the remaining area or hole by nul for
472 * updating ->i_size_aligned
473 *
474 * Return 0, and fallback to normal buffered write.
475 */
476 if (EXFAT_I(inode)->i_size_aligned < size)
477 return 0;
478 }
479
480 /*
481 * Need to use the DIO_LOCKING for avoiding the race
482 * condition of exfat_get_block() and ->truncate().
483 */
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900484#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900485 ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900486#else
487 ret = blockdev_direct_IO(iocb, inode, iter, offset, exfat_get_block);
488#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900489 if (ret < 0 && (rw & WRITE))
490 exfat_write_failed(mapping, size);
491 return ret;
492}
493
494static sector_t exfat_aop_bmap(struct address_space *mapping, sector_t block)
495{
496 sector_t blocknr;
497
498 /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */
499 down_read(&EXFAT_I(mapping->host)->truncate_lock);
500 blocknr = generic_block_bmap(mapping, block, exfat_get_block);
501 up_read(&EXFAT_I(mapping->host)->truncate_lock);
502 return blocknr;
503}
504
505/*
506 * exfat_block_truncate_page() zeroes out a mapping from file offset `from'
507 * up to the end of the block which corresponds to `from'.
508 * This is required during truncate to physically zeroout the tail end
509 * of that block so it doesn't yield old data if the file is later grown.
510 * Also, avoid causing failure from fsx for cases of "data past EOF"
511 */
512int exfat_block_truncate_page(struct inode *inode, loff_t from)
513{
514 return block_truncate_page(inode->i_mapping, from, exfat_get_block);
515}
516
517static const struct address_space_operations exfat_aops = {
Matthew Wilcox (Oracle)e145a5f2022-05-11 15:44:32 +0900518#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
519 .dirty_folio = block_dirty_folio,
520#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
Christoph Hellwigbf7b4052022-05-11 15:32:40 +0900521 .set_page_dirty = __set_page_dirty_buffers,
522#endif
Matthew Wilcox (Oracle)1270e742022-05-11 15:42:18 +0900523#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
524 .invalidate_folio = block_invalidate_folio,
525#endif
Matthew Wilcox (Oracle)c9d13ea2022-07-21 12:00:32 +0900526#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)
527 .read_folio = exfat_read_folio,
528#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900529 .readpage = exfat_readpage,
Matthew Wilcox (Oracle)c9d13ea2022-07-21 12:00:32 +0900530#endif
Matthew Wilcox (Oracle)b457fd72020-06-16 14:31:47 +0900531#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
532 .readahead = exfat_readahead,
533#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900534 .readpages = exfat_readpages,
Matthew Wilcox (Oracle)b457fd72020-06-16 14:31:47 +0900535#endif
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900536#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900537 .writepage = exfat_writepage,
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900538#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900539 .writepages = exfat_writepages,
540 .write_begin = exfat_write_begin,
541 .write_end = exfat_write_end,
542 .direct_IO = exfat_direct_IO,
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900543#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900544 .bmap = exfat_aop_bmap
Christoph Hellwig0e44fbe2023-01-13 23:05:15 +0900545#else
546 .bmap = exfat_aop_bmap,
547 .migrate_folio = buffer_migrate_folio,
548#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900549};
550
551static inline unsigned long exfat_hash(loff_t i_pos)
552{
553 return hash_32(i_pos, EXFAT_HASH_BITS);
554}
555
556void exfat_hash_inode(struct inode *inode, loff_t i_pos)
557{
558 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
559 struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
560
561 spin_lock(&sbi->inode_hash_lock);
562 EXFAT_I(inode)->i_pos = i_pos;
563 hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head);
564 spin_unlock(&sbi->inode_hash_lock);
565}
566
567void exfat_unhash_inode(struct inode *inode)
568{
569 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
570
571 spin_lock(&sbi->inode_hash_lock);
572 hlist_del_init(&EXFAT_I(inode)->i_hash_fat);
573 EXFAT_I(inode)->i_pos = 0;
574 spin_unlock(&sbi->inode_hash_lock);
575}
576
577struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
578{
579 struct exfat_sb_info *sbi = EXFAT_SB(sb);
580 struct exfat_inode_info *info;
581 struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
582 struct inode *inode = NULL;
583
584 spin_lock(&sbi->inode_hash_lock);
585 hlist_for_each_entry(info, head, i_hash_fat) {
586 WARN_ON(info->vfs_inode.i_sb != sb);
587
588 if (i_pos != info->i_pos)
589 continue;
590 inode = igrab(&info->vfs_inode);
591 if (inode)
592 break;
593 }
594 spin_unlock(&sbi->inode_hash_lock);
595 return inode;
596}
597
598/* doesn't deal with root inode */
599static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
600{
601 struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
602 struct exfat_inode_info *ei = EXFAT_I(inode);
603 loff_t size = info->size;
604
Tetsuhiro Kohada90797eb2020-09-11 15:41:12 +0900605 ei->dir = info->dir;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900606 ei->entry = info->entry;
607 ei->attr = info->attr;
608 ei->start_clu = info->start_clu;
609 ei->flags = info->flags;
610 ei->type = info->type;
611
612 ei->version = 0;
613 ei->hint_stat.eidx = 0;
614 ei->hint_stat.clu = info->start_clu;
615 ei->hint_femp.eidx = EXFAT_HINT_NONE;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900616 ei->hint_bmap.off = EXFAT_EOF_CLUSTER;
617 ei->i_pos = 0;
618
619 inode->i_uid = sbi->options.fs_uid;
620 inode->i_gid = sbi->options.fs_gid;
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900621#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900622 inode_inc_iversion(inode);
Namjae Jeonf53d47c2020-02-04 09:18:10 +0900623#else
624 inode->i_version++;
625#endif
Jason A. Donenfeldb2d2ce02022-11-19 14:58:44 +0900626#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
627 inode->i_generation = get_random_u32();
628#else
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900629 inode->i_generation = prandom_u32();
Jason A. Donenfeldb2d2ce02022-11-19 14:58:44 +0900630#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900631
632 if (info->attr & ATTR_SUBDIR) { /* directory */
633 inode->i_generation &= ~1;
634 inode->i_mode = exfat_make_mode(sbi, info->attr, 0777);
635 inode->i_op = &exfat_dir_inode_operations;
636 inode->i_fop = &exfat_dir_operations;
637 set_nlink(inode, info->num_subdirs);
638 } else { /* regular file */
639 inode->i_generation |= 1;
640 inode->i_mode = exfat_make_mode(sbi, info->attr, 0777);
641 inode->i_op = &exfat_file_inode_operations;
642 inode->i_fop = &exfat_file_operations;
643 inode->i_mapping->a_ops = &exfat_aops;
644 inode->i_mapping->nrpages = 0;
645 }
646
647 i_size_write(inode, size);
648
649 /* ondisk and aligned size should be aligned with block size */
650 if (size & (inode->i_sb->s_blocksize - 1)) {
651 size |= (inode->i_sb->s_blocksize - 1);
652 size++;
653 }
654
655 ei->i_size_aligned = size;
656 ei->i_size_ondisk = size;
657
658 exfat_save_attr(inode, info->attr);
659
Namjae Jeon3eef5402023-01-08 18:14:43 +0900660 inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900661 inode->i_mtime = info->mtime;
662 inode->i_ctime = info->mtime;
663 ei->i_crtime = info->crtime;
664 inode->i_atime = info->atime;
665
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900666 return 0;
667}
668
669struct inode *exfat_build_inode(struct super_block *sb,
670 struct exfat_dir_entry *info, loff_t i_pos)
671{
672 struct inode *inode;
673 int err;
674
675 inode = exfat_iget(sb, i_pos);
676 if (inode)
677 goto out;
678 inode = new_inode(sb);
679 if (!inode) {
680 inode = ERR_PTR(-ENOMEM);
681 goto out;
682 }
683 inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
Namjae Jeon4e3295f2020-02-08 19:13:34 +0900684#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900685 inode_set_iversion(inode, 1);
Namjae Jeonf53d47c2020-02-04 09:18:10 +0900686#else
687 inode->i_version = 1;
688#endif
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900689 err = exfat_fill_inode(inode, info);
690 if (err) {
691 iput(inode);
692 inode = ERR_PTR(err);
693 goto out;
694 }
695 exfat_hash_inode(inode, i_pos);
696 insert_inode_hash(inode);
697out:
698 return inode;
699}
700
701void exfat_evict_inode(struct inode *inode)
702{
703 truncate_inode_pages(&inode->i_data, 0);
704
705 if (!inode->i_nlink) {
706 i_size_write(inode, 0);
707 mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
Yuezhang Moba9c0dc2022-12-09 22:39:56 +0900708 __exfat_truncate(inode);
Namjae Jeon9273f3d2020-02-03 21:47:19 +0900709 mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
710 }
711
712 invalidate_inode_buffers(inode);
713 clear_inode(inode);
714 exfat_cache_inval_inode(inode);
715 exfat_unhash_inode(inode);
716}