blob: 5e9157d0da294bbd768c45cc231ba9f0e6160da2 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells31143d52007-05-09 02:33:46 -07002/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells31143d52007-05-09 02:33:46 -07006 */
David Howells4343d002017-11-02 15:27:52 +00007
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -07009#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
David Howells3003bbd2020-02-06 14:22:29 +000014#include <linux/netfs.h>
David Howells31143d52007-05-09 02:33:46 -070015#include "internal.h"
16
David Howellsc7f75ef2020-02-06 14:22:30 +000017static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
18 loff_t i_size, bool caching);
19
20#ifdef CONFIG_AFS_FSCACHE
David Howells31143d52007-05-09 02:33:46 -070021/*
David Howellsc7f75ef2020-02-06 14:22:30 +000022 * Mark a page as having been made dirty and thus needing writeback. We also
23 * need to pin the cache object to write back to.
David Howells31143d52007-05-09 02:33:46 -070024 */
25int afs_set_page_dirty(struct page *page)
26{
David Howellsc7f75ef2020-02-06 14:22:30 +000027 return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host)));
David Howells31143d52007-05-09 02:33:46 -070028}
David Howellsc7f75ef2020-02-06 14:22:30 +000029static void afs_folio_start_fscache(bool caching, struct folio *folio)
30{
31 if (caching)
32 folio_start_fscache(folio);
33}
34#else
35static void afs_folio_start_fscache(bool caching, struct folio *folio)
36{
37}
38#endif
David Howells31143d52007-05-09 02:33:46 -070039
40/*
David Howells31143d52007-05-09 02:33:46 -070041 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070042 */
Nick Piggin15b46502008-10-15 22:04:32 -070043int afs_write_begin(struct file *file, struct address_space *mapping,
44 loff_t pos, unsigned len, unsigned flags,
David Howells21db2cd2020-10-22 14:03:03 +010045 struct page **_page, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070046{
Al Viro496ad9a2013-01-23 17:07:38 -050047 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells78525c72021-08-11 09:49:13 +010048 struct folio *folio;
David Howells4343d002017-11-02 15:27:52 +000049 unsigned long priv;
David Howellse87b03f2020-10-20 09:33:45 +010050 unsigned f, from;
51 unsigned t, to;
52 pgoff_t index;
David Howells31143d52007-05-09 02:33:46 -070053 int ret;
54
David Howellse87b03f2020-10-20 09:33:45 +010055 _enter("{%llx:%llu},%llx,%x",
56 vnode->fid.vid, vnode->fid.vnode, pos, len);
David Howells31143d52007-05-09 02:33:46 -070057
David Howells3003bbd2020-02-06 14:22:29 +000058 /* Prefetch area to be written into the cache if we're caching this
59 * file. We need to do this before we get a lock on the page in case
60 * there's more than one writer competing for the same cache block.
61 */
David Howells78525c72021-08-11 09:49:13 +010062 ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
David Howells3003bbd2020-02-06 14:22:29 +000063 &afs_req_ops, NULL);
64 if (ret < 0)
65 return ret;
David Howells630f5dd2020-02-06 14:22:28 +000066
David Howells78525c72021-08-11 09:49:13 +010067 index = folio_index(folio);
David Howellse87b03f2020-10-20 09:33:45 +010068 from = pos - index * PAGE_SIZE;
69 to = from + len;
70
David Howells31143d52007-05-09 02:33:46 -070071try_again:
David Howells4343d002017-11-02 15:27:52 +000072 /* See if this page is already partially written in a way that we can
73 * merge the new write with.
74 */
David Howells78525c72021-08-11 09:49:13 +010075 if (folio_test_private(folio)) {
76 priv = (unsigned long)folio_get_private(folio);
77 f = afs_folio_dirty_from(folio, priv);
78 t = afs_folio_dirty_to(folio, priv);
David Howells4343d002017-11-02 15:27:52 +000079 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -070080
David Howells78525c72021-08-11 09:49:13 +010081 if (folio_test_writeback(folio)) {
82 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
David Howells5a039c32017-11-18 00:13:30 +000083 goto flush_conflicting_write;
84 }
David Howells5a813272018-04-06 14:17:26 +010085 /* If the file is being filled locally, allow inter-write
86 * spaces to be merged into writes. If it's not, only write
87 * back what the user gives us.
88 */
89 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
90 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +000091 goto flush_conflicting_write;
David Howells31143d52007-05-09 02:33:46 -070092 }
93
David Howells78525c72021-08-11 09:49:13 +010094 *_page = &folio->page;
David Howells4343d002017-11-02 15:27:52 +000095 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -070096 return 0;
97
David Howells4343d002017-11-02 15:27:52 +000098 /* The previous write and this write aren't adjacent or overlapping, so
99 * flush the page out.
100 */
101flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700102 _debug("flush conflict");
David Howells78525c72021-08-11 09:49:13 +0100103 ret = folio_write_one(folio);
David Howells21db2cd2020-10-22 14:03:03 +0100104 if (ret < 0)
105 goto error;
David Howells31143d52007-05-09 02:33:46 -0700106
David Howells78525c72021-08-11 09:49:13 +0100107 ret = folio_lock_killable(folio);
David Howells21db2cd2020-10-22 14:03:03 +0100108 if (ret < 0)
109 goto error;
David Howells31143d52007-05-09 02:33:46 -0700110 goto try_again;
David Howells21db2cd2020-10-22 14:03:03 +0100111
112error:
David Howells78525c72021-08-11 09:49:13 +0100113 folio_put(folio);
David Howells21db2cd2020-10-22 14:03:03 +0100114 _leave(" = %d", ret);
115 return ret;
David Howells31143d52007-05-09 02:33:46 -0700116}
117
118/*
119 * finalise part of a write to a page
120 */
Nick Piggin15b46502008-10-15 22:04:32 -0700121int afs_write_end(struct file *file, struct address_space *mapping,
122 loff_t pos, unsigned len, unsigned copied,
David Howells78525c72021-08-11 09:49:13 +0100123 struct page *subpage, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700124{
David Howells78525c72021-08-11 09:49:13 +0100125 struct folio *folio = page_folio(subpage);
Al Viro496ad9a2013-01-23 17:07:38 -0500126 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howellsf792e3a2020-10-26 14:05:33 +0000127 unsigned long priv;
David Howells78525c72021-08-11 09:49:13 +0100128 unsigned int f, from = offset_in_folio(folio, pos);
David Howellsf792e3a2020-10-26 14:05:33 +0000129 unsigned int t, to = from + copied;
David Howellsc7f75ef2020-02-06 14:22:30 +0000130 loff_t i_size, write_end_pos;
David Howells31143d52007-05-09 02:33:46 -0700131
David Howells3b6492d2018-10-20 00:57:57 +0100132 _enter("{%llx:%llu},{%lx}",
David Howells78525c72021-08-11 09:49:13 +0100133 vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
David Howells31143d52007-05-09 02:33:46 -0700134
David Howells78525c72021-08-11 09:49:13 +0100135 if (!folio_test_uptodate(folio)) {
David Howells66e9c6a2021-06-14 14:13:41 +0100136 if (copied < len) {
137 copied = 0;
138 goto out;
139 }
140
David Howells78525c72021-08-11 09:49:13 +0100141 folio_mark_uptodate(folio);
David Howells66e9c6a2021-06-14 14:13:41 +0100142 }
143
David Howells3ad216e2020-11-14 17:27:57 +0000144 if (copied == 0)
145 goto out;
146
David Howellsc7f75ef2020-02-06 14:22:30 +0000147 write_end_pos = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700148
149 i_size = i_size_read(&vnode->vfs_inode);
David Howellsc7f75ef2020-02-06 14:22:30 +0000150 if (write_end_pos > i_size) {
David Howells1f32ef72020-06-12 23:58:51 +0100151 write_seqlock(&vnode->cb_lock);
David Howells31143d52007-05-09 02:33:46 -0700152 i_size = i_size_read(&vnode->vfs_inode);
David Howellsc7f75ef2020-02-06 14:22:30 +0000153 if (write_end_pos > i_size)
154 afs_set_i_size(vnode, write_end_pos);
David Howells1f32ef72020-06-12 23:58:51 +0100155 write_sequnlock(&vnode->cb_lock);
David Howellsc7f75ef2020-02-06 14:22:30 +0000156 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
David Howells31143d52007-05-09 02:33:46 -0700157 }
158
David Howells78525c72021-08-11 09:49:13 +0100159 if (folio_test_private(folio)) {
160 priv = (unsigned long)folio_get_private(folio);
161 f = afs_folio_dirty_from(folio, priv);
162 t = afs_folio_dirty_to(folio, priv);
David Howellsf792e3a2020-10-26 14:05:33 +0000163 if (from < f)
164 f = from;
165 if (to > t)
166 t = to;
David Howells78525c72021-08-11 09:49:13 +0100167 priv = afs_folio_dirty(folio, f, t);
168 folio_change_private(folio, (void *)priv);
169 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
David Howellsf792e3a2020-10-26 14:05:33 +0000170 } else {
David Howells78525c72021-08-11 09:49:13 +0100171 priv = afs_folio_dirty(folio, from, to);
172 folio_attach_private(folio, (void *)priv);
173 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
David Howellsf792e3a2020-10-26 14:05:33 +0000174 }
175
David Howells78525c72021-08-11 09:49:13 +0100176 if (folio_mark_dirty(folio))
177 _debug("dirtied %lx", folio_index(folio));
David Howellsafae4572018-01-02 10:02:19 +0000178
179out:
David Howells78525c72021-08-11 09:49:13 +0100180 folio_unlock(folio);
181 folio_put(folio);
David Howells3003bbd2020-02-06 14:22:29 +0000182 return copied;
David Howells31143d52007-05-09 02:33:46 -0700183}
184
185/*
186 * kill all the pages in the given range
187 */
David Howells4343d002017-11-02 15:27:52 +0000188static void afs_kill_pages(struct address_space *mapping,
David Howellse87b03f2020-10-20 09:33:45 +0100189 loff_t start, loff_t len)
David Howells31143d52007-05-09 02:33:46 -0700190{
David Howells4343d002017-11-02 15:27:52 +0000191 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells78525c72021-08-11 09:49:13 +0100192 struct folio *folio;
193 pgoff_t index = start / PAGE_SIZE;
194 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
David Howells31143d52007-05-09 02:33:46 -0700195
David Howellse87b03f2020-10-20 09:33:45 +0100196 _enter("{%llx:%llu},%llx @%llx",
197 vnode->fid.vid, vnode->fid.vnode, len, start);
David Howells31143d52007-05-09 02:33:46 -0700198
David Howells31143d52007-05-09 02:33:46 -0700199 do {
David Howells78525c72021-08-11 09:49:13 +0100200 _debug("kill %lx (to %lx)", index, last);
David Howells31143d52007-05-09 02:33:46 -0700201
David Howells78525c72021-08-11 09:49:13 +0100202 folio = filemap_get_folio(mapping, index);
203 if (!folio) {
204 next = index + 1;
205 continue;
David Howells31143d52007-05-09 02:33:46 -0700206 }
207
David Howells78525c72021-08-11 09:49:13 +0100208 next = folio_next_index(folio);
209
210 folio_clear_uptodate(folio);
211 folio_end_writeback(folio);
212 folio_lock(folio);
213 generic_error_remove_page(mapping, &folio->page);
214 folio_unlock(folio);
215 folio_put(folio);
216
217 } while (index = next, index <= last);
David Howells31143d52007-05-09 02:33:46 -0700218
219 _leave("");
220}
221
222/*
David Howells4343d002017-11-02 15:27:52 +0000223 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700224 */
David Howells4343d002017-11-02 15:27:52 +0000225static void afs_redirty_pages(struct writeback_control *wbc,
226 struct address_space *mapping,
David Howellse87b03f2020-10-20 09:33:45 +0100227 loff_t start, loff_t len)
David Howells31143d52007-05-09 02:33:46 -0700228{
David Howells4343d002017-11-02 15:27:52 +0000229 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells78525c72021-08-11 09:49:13 +0100230 struct folio *folio;
231 pgoff_t index = start / PAGE_SIZE;
232 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
David Howells4343d002017-11-02 15:27:52 +0000233
David Howellse87b03f2020-10-20 09:33:45 +0100234 _enter("{%llx:%llu},%llx @%llx",
235 vnode->fid.vid, vnode->fid.vnode, len, start);
David Howells4343d002017-11-02 15:27:52 +0000236
David Howells4343d002017-11-02 15:27:52 +0000237 do {
David Howellse87b03f2020-10-20 09:33:45 +0100238 _debug("redirty %llx @%llx", len, start);
David Howells4343d002017-11-02 15:27:52 +0000239
David Howells78525c72021-08-11 09:49:13 +0100240 folio = filemap_get_folio(mapping, index);
241 if (!folio) {
242 next = index + 1;
243 continue;
David Howells31143d52007-05-09 02:33:46 -0700244 }
245
David Howells78525c72021-08-11 09:49:13 +0100246 next = index + folio_nr_pages(folio);
247 folio_redirty_for_writepage(wbc, folio);
248 folio_end_writeback(folio);
249 folio_put(folio);
250 } while (index = next, index <= last);
David Howells31143d52007-05-09 02:33:46 -0700251
252 _leave("");
253}
254
255/*
David Howellsa58823a2019-05-09 15:16:10 +0100256 * completion of write to server
257 */
David Howellse87b03f2020-10-20 09:33:45 +0100258static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
David Howellsa58823a2019-05-09 15:16:10 +0100259{
David Howellsbd80d8a2020-02-06 14:22:28 +0000260 struct address_space *mapping = vnode->vfs_inode.i_mapping;
David Howells78525c72021-08-11 09:49:13 +0100261 struct folio *folio;
David Howellse87b03f2020-10-20 09:33:45 +0100262 pgoff_t end;
David Howellsbd80d8a2020-02-06 14:22:28 +0000263
David Howellse87b03f2020-10-20 09:33:45 +0100264 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
David Howellsa58823a2019-05-09 15:16:10 +0100265
David Howellse87b03f2020-10-20 09:33:45 +0100266 _enter("{%llx:%llu},{%x @%llx}",
267 vnode->fid.vid, vnode->fid.vnode, len, start);
David Howellsa58823a2019-05-09 15:16:10 +0100268
David Howellsbd80d8a2020-02-06 14:22:28 +0000269 rcu_read_lock();
David Howellsa58823a2019-05-09 15:16:10 +0100270
David Howellse87b03f2020-10-20 09:33:45 +0100271 end = (start + len - 1) / PAGE_SIZE;
David Howells78525c72021-08-11 09:49:13 +0100272 xas_for_each(&xas, folio, end) {
273 if (!folio_test_writeback(folio)) {
274 kdebug("bad %x @%llx page %lx %lx",
275 len, start, folio_index(folio), end);
276 ASSERT(folio_test_writeback(folio));
David Howellse87b03f2020-10-20 09:33:45 +0100277 }
David Howellsa58823a2019-05-09 15:16:10 +0100278
David Howells78525c72021-08-11 09:49:13 +0100279 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
280 folio_detach_private(folio);
281 folio_end_writeback(folio);
David Howellsbd80d8a2020-02-06 14:22:28 +0000282 }
David Howellsa58823a2019-05-09 15:16:10 +0100283
David Howellsbd80d8a2020-02-06 14:22:28 +0000284 rcu_read_unlock();
David Howellsa58823a2019-05-09 15:16:10 +0100285
286 afs_prune_wb_keys(vnode);
287 _leave("");
288}
289
290/*
David Howellse49c7b22020-04-10 20:51:51 +0100291 * Find a key to use for the writeback. We cached the keys used to author the
292 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
293 * and we need to start from there if it's set.
294 */
295static int afs_get_writeback_key(struct afs_vnode *vnode,
296 struct afs_wb_key **_wbk)
297{
298 struct afs_wb_key *wbk = NULL;
299 struct list_head *p;
300 int ret = -ENOKEY, ret2;
301
302 spin_lock(&vnode->wb_lock);
303 if (*_wbk)
304 p = (*_wbk)->vnode_link.next;
305 else
306 p = vnode->wb_keys.next;
307
308 while (p != &vnode->wb_keys) {
309 wbk = list_entry(p, struct afs_wb_key, vnode_link);
310 _debug("wbk %u", key_serial(wbk->key));
311 ret2 = key_validate(wbk->key);
312 if (ret2 == 0) {
313 refcount_inc(&wbk->usage);
314 _debug("USE WB KEY %u", key_serial(wbk->key));
315 break;
316 }
317
318 wbk = NULL;
319 if (ret == -ENOKEY)
320 ret = ret2;
321 p = p->next;
322 }
323
324 spin_unlock(&vnode->wb_lock);
325 if (*_wbk)
326 afs_put_wb_key(*_wbk);
327 *_wbk = wbk;
328 return 0;
329}
330
331static void afs_store_data_success(struct afs_operation *op)
332{
333 struct afs_vnode *vnode = op->file[0].vnode;
334
David Howellsda8d0752020-06-13 19:34:59 +0100335 op->ctime = op->file[0].scb.status.mtime_client;
David Howellse49c7b22020-04-10 20:51:51 +0100336 afs_vnode_commit_status(op, &op->file[0]);
337 if (op->error == 0) {
David Howellsd383e342020-10-22 14:40:31 +0100338 if (!op->store.laundering)
David Howellse87b03f2020-10-20 09:33:45 +0100339 afs_pages_written_back(vnode, op->store.pos, op->store.size);
David Howellse49c7b22020-04-10 20:51:51 +0100340 afs_stat_v(vnode, n_stores);
David Howellsbd80d8a2020-02-06 14:22:28 +0000341 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
David Howellse49c7b22020-04-10 20:51:51 +0100342 }
343}
344
345static const struct afs_operation_ops afs_store_data_operation = {
346 .issue_afs_rpc = afs_fs_store_data,
347 .issue_yfs_rpc = yfs_fs_store_data,
348 .success = afs_store_data_success,
349};
350
351/*
David Howellsd2ddc772017-11-02 15:27:50 +0000352 * write to a file
353 */
David Howellse87b03f2020-10-20 09:33:45 +0100354static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
David Howellsbd80d8a2020-02-06 14:22:28 +0000355 bool laundering)
David Howellsd2ddc772017-11-02 15:27:50 +0000356{
David Howellse49c7b22020-04-10 20:51:51 +0100357 struct afs_operation *op;
David Howells4343d002017-11-02 15:27:52 +0000358 struct afs_wb_key *wbk = NULL;
David Howellsbd80d8a2020-02-06 14:22:28 +0000359 loff_t size = iov_iter_count(iter), i_size;
360 int ret = -ENOKEY;
David Howellsd2ddc772017-11-02 15:27:50 +0000361
David Howellsbd80d8a2020-02-06 14:22:28 +0000362 _enter("%s{%llx:%llu.%u},%llx,%llx",
David Howellsd2ddc772017-11-02 15:27:50 +0000363 vnode->volume->name,
364 vnode->fid.vid,
365 vnode->fid.vnode,
366 vnode->fid.unique,
David Howellsbd80d8a2020-02-06 14:22:28 +0000367 size, pos);
David Howellsd2ddc772017-11-02 15:27:50 +0000368
David Howellse49c7b22020-04-10 20:51:51 +0100369 ret = afs_get_writeback_key(vnode, &wbk);
370 if (ret) {
371 _leave(" = %d [no keys]", ret);
372 return ret;
373 }
374
375 op = afs_alloc_operation(wbk->key, vnode->volume);
376 if (IS_ERR(op)) {
377 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100378 return -ENOMEM;
David Howellse49c7b22020-04-10 20:51:51 +0100379 }
David Howellsa58823a2019-05-09 15:16:10 +0100380
David Howellsbd80d8a2020-02-06 14:22:28 +0000381 i_size = i_size_read(&vnode->vfs_inode);
382
David Howellse49c7b22020-04-10 20:51:51 +0100383 afs_op_set_vnode(op, 0, vnode);
384 op->file[0].dv_delta = 1;
David Howells22650f12021-04-30 13:47:08 +0100385 op->file[0].modification = true;
David Howellsbd80d8a2020-02-06 14:22:28 +0000386 op->store.write_iter = iter;
387 op->store.pos = pos;
David Howellsbd80d8a2020-02-06 14:22:28 +0000388 op->store.size = size;
389 op->store.i_size = max(pos + size, i_size);
David Howellsd383e342020-10-22 14:40:31 +0100390 op->store.laundering = laundering;
David Howellsb3597942020-06-11 21:50:24 +0100391 op->mtime = vnode->vfs_inode.i_mtime;
David Howells811f04b2020-07-08 09:27:07 +0100392 op->flags |= AFS_OPERATION_UNINTR;
David Howellse49c7b22020-04-10 20:51:51 +0100393 op->ops = &afs_store_data_operation;
David Howells4343d002017-11-02 15:27:52 +0000394
David Howells4343d002017-11-02 15:27:52 +0000395try_next_key:
David Howellse49c7b22020-04-10 20:51:51 +0100396 afs_begin_vnode_operation(op);
397 afs_wait_for_operation(op);
David Howells4343d002017-11-02 15:27:52 +0000398
David Howellse49c7b22020-04-10 20:51:51 +0100399 switch (op->error) {
David Howells4343d002017-11-02 15:27:52 +0000400 case -EACCES:
401 case -EPERM:
402 case -ENOKEY:
403 case -EKEYEXPIRED:
404 case -EKEYREJECTED:
405 case -EKEYREVOKED:
406 _debug("next");
David Howellse49c7b22020-04-10 20:51:51 +0100407
408 ret = afs_get_writeback_key(vnode, &wbk);
409 if (ret == 0) {
410 key_put(op->key);
411 op->key = key_get(wbk->key);
412 goto try_next_key;
413 }
414 break;
David Howells4343d002017-11-02 15:27:52 +0000415 }
416
417 afs_put_wb_key(wbk);
David Howellse49c7b22020-04-10 20:51:51 +0100418 _leave(" = %d", op->error);
419 return afs_put_operation(op);
David Howellsd2ddc772017-11-02 15:27:50 +0000420}
421
422/*
David Howells810caa32020-10-30 10:01:09 +0000423 * Extend the region to be written back to include subsequent contiguously
424 * dirty pages if possible, but don't sleep while doing so.
425 *
426 * If this page holds new content, then we can include filler zeros in the
427 * writeback.
David Howells31143d52007-05-09 02:33:46 -0700428 */
David Howells810caa32020-10-30 10:01:09 +0000429static void afs_extend_writeback(struct address_space *mapping,
430 struct afs_vnode *vnode,
431 long *_count,
David Howellse87b03f2020-10-20 09:33:45 +0100432 loff_t start,
433 loff_t max_len,
434 bool new_content,
David Howellsc7f75ef2020-02-06 14:22:30 +0000435 bool caching,
David Howellse87b03f2020-10-20 09:33:45 +0100436 unsigned int *_len)
David Howells31143d52007-05-09 02:33:46 -0700437{
David Howellse87b03f2020-10-20 09:33:45 +0100438 struct pagevec pvec;
David Howells78525c72021-08-11 09:49:13 +0100439 struct folio *folio;
David Howellse87b03f2020-10-20 09:33:45 +0100440 unsigned long priv;
441 unsigned int psize, filler = 0;
442 unsigned int f, t;
443 loff_t len = *_len;
444 pgoff_t index = (start + len) / PAGE_SIZE;
445 bool stop = true;
446 unsigned int i;
David Howells4343d002017-11-02 15:27:52 +0000447
David Howellse87b03f2020-10-20 09:33:45 +0100448 XA_STATE(xas, &mapping->i_pages, index);
449 pagevec_init(&pvec);
450
David Howells31143d52007-05-09 02:33:46 -0700451 do {
David Howellse87b03f2020-10-20 09:33:45 +0100452 /* Firstly, we gather up a batch of contiguous dirty pages
453 * under the RCU read lock - but we can't clear the dirty flags
454 * there if any of those pages are mapped.
455 */
456 rcu_read_lock();
David Howells31143d52007-05-09 02:33:46 -0700457
David Howells78525c72021-08-11 09:49:13 +0100458 xas_for_each(&xas, folio, ULONG_MAX) {
David Howellse87b03f2020-10-20 09:33:45 +0100459 stop = true;
David Howells78525c72021-08-11 09:49:13 +0100460 if (xas_retry(&xas, folio))
David Howellse87b03f2020-10-20 09:33:45 +0100461 continue;
David Howells78525c72021-08-11 09:49:13 +0100462 if (xa_is_value(folio))
David Howells5a813272018-04-06 14:17:26 +0100463 break;
David Howells78525c72021-08-11 09:49:13 +0100464 if (folio_index(folio) != index)
David Howells31143d52007-05-09 02:33:46 -0700465 break;
David Howellse87b03f2020-10-20 09:33:45 +0100466
David Howells78525c72021-08-11 09:49:13 +0100467 if (!folio_try_get_rcu(folio)) {
David Howellse87b03f2020-10-20 09:33:45 +0100468 xas_reset(&xas);
469 continue;
470 }
471
472 /* Has the page moved or been split? */
David Howells78525c72021-08-11 09:49:13 +0100473 if (unlikely(folio != xas_reload(&xas))) {
474 folio_put(folio);
David Howellse87b03f2020-10-20 09:33:45 +0100475 break;
David Howells581b2022021-09-01 09:15:21 +0100476 }
David Howellse87b03f2020-10-20 09:33:45 +0100477
David Howells78525c72021-08-11 09:49:13 +0100478 if (!folio_trylock(folio)) {
479 folio_put(folio);
David Howells31143d52007-05-09 02:33:46 -0700480 break;
David Howells581b2022021-09-01 09:15:21 +0100481 }
David Howellsc7f75ef2020-02-06 14:22:30 +0000482 if (!folio_test_dirty(folio) ||
483 folio_test_writeback(folio) ||
484 folio_test_fscache(folio)) {
David Howells78525c72021-08-11 09:49:13 +0100485 folio_unlock(folio);
486 folio_put(folio);
David Howells31143d52007-05-09 02:33:46 -0700487 break;
488 }
David Howells4343d002017-11-02 15:27:52 +0000489
David Howells78525c72021-08-11 09:49:13 +0100490 psize = folio_size(folio);
491 priv = (unsigned long)folio_get_private(folio);
492 f = afs_folio_dirty_from(folio, priv);
493 t = afs_folio_dirty_to(folio, priv);
David Howells810caa32020-10-30 10:01:09 +0000494 if (f != 0 && !new_content) {
David Howells78525c72021-08-11 09:49:13 +0100495 folio_unlock(folio);
496 folio_put(folio);
David Howells4343d002017-11-02 15:27:52 +0000497 break;
498 }
David Howells4343d002017-11-02 15:27:52 +0000499
David Howellse87b03f2020-10-20 09:33:45 +0100500 len += filler + t;
501 filler = psize - t;
502 if (len >= max_len || *_count <= 0)
503 stop = true;
504 else if (t == psize || new_content)
505 stop = false;
506
David Howells78525c72021-08-11 09:49:13 +0100507 index += folio_nr_pages(folio);
508 if (!pagevec_add(&pvec, &folio->page))
David Howellse87b03f2020-10-20 09:33:45 +0100509 break;
510 if (stop)
511 break;
512 }
513
514 if (!stop)
515 xas_pause(&xas);
516 rcu_read_unlock();
517
518 /* Now, if we obtained any pages, we can shift them to being
519 * writable and mark them for caching.
520 */
521 if (!pagevec_count(&pvec))
522 break;
523
524 for (i = 0; i < pagevec_count(&pvec); i++) {
David Howells78525c72021-08-11 09:49:13 +0100525 folio = page_folio(pvec.pages[i]);
526 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
David Howells13524ab2017-11-02 15:27:53 +0000527
David Howells78525c72021-08-11 09:49:13 +0100528 if (!folio_clear_dirty_for_io(folio))
David Howells31143d52007-05-09 02:33:46 -0700529 BUG();
David Howells78525c72021-08-11 09:49:13 +0100530 if (folio_start_writeback(folio))
David Howells31143d52007-05-09 02:33:46 -0700531 BUG();
David Howellsc7f75ef2020-02-06 14:22:30 +0000532 afs_folio_start_fscache(caching, folio);
David Howellse87b03f2020-10-20 09:33:45 +0100533
David Howells78525c72021-08-11 09:49:13 +0100534 *_count -= folio_nr_pages(folio);
535 folio_unlock(folio);
David Howells31143d52007-05-09 02:33:46 -0700536 }
537
David Howellse87b03f2020-10-20 09:33:45 +0100538 pagevec_release(&pvec);
539 cond_resched();
540 } while (!stop);
David Howells31143d52007-05-09 02:33:46 -0700541
David Howellse87b03f2020-10-20 09:33:45 +0100542 *_len = len;
David Howells810caa32020-10-30 10:01:09 +0000543}
544
545/*
546 * Synchronously write back the locked page and any subsequent non-locked dirty
547 * pages.
548 */
David Howells78525c72021-08-11 09:49:13 +0100549static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
550 struct writeback_control *wbc,
551 struct folio *folio,
552 loff_t start, loff_t end)
David Howells810caa32020-10-30 10:01:09 +0000553{
554 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
555 struct iov_iter iter;
David Howellse87b03f2020-10-20 09:33:45 +0100556 unsigned long priv;
557 unsigned int offset, to, len, max_len;
558 loff_t i_size = i_size_read(&vnode->vfs_inode);
David Howells810caa32020-10-30 10:01:09 +0000559 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
David Howellsc7f75ef2020-02-06 14:22:30 +0000560 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
David Howellse87b03f2020-10-20 09:33:45 +0100561 long count = wbc->nr_to_write;
David Howells810caa32020-10-30 10:01:09 +0000562 int ret;
563
David Howells78525c72021-08-11 09:49:13 +0100564 _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
David Howells810caa32020-10-30 10:01:09 +0000565
David Howells78525c72021-08-11 09:49:13 +0100566 if (folio_start_writeback(folio))
David Howells810caa32020-10-30 10:01:09 +0000567 BUG();
David Howellsc7f75ef2020-02-06 14:22:30 +0000568 afs_folio_start_fscache(caching, folio);
David Howells810caa32020-10-30 10:01:09 +0000569
David Howells78525c72021-08-11 09:49:13 +0100570 count -= folio_nr_pages(folio);
David Howellse87b03f2020-10-20 09:33:45 +0100571
David Howells810caa32020-10-30 10:01:09 +0000572 /* Find all consecutive lockable dirty pages that have contiguous
573 * written regions, stopping when we find a page that is not
574 * immediately lockable, is not dirty or is missing, or we reach the
575 * end of the range.
576 */
David Howells78525c72021-08-11 09:49:13 +0100577 priv = (unsigned long)folio_get_private(folio);
578 offset = afs_folio_dirty_from(folio, priv);
579 to = afs_folio_dirty_to(folio, priv);
580 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
David Howells810caa32020-10-30 10:01:09 +0000581
David Howellse87b03f2020-10-20 09:33:45 +0100582 len = to - offset;
583 start += offset;
584 if (start < i_size) {
585 /* Trim the write to the EOF; the extra data is ignored. Also
586 * put an upper limit on the size of a single storedata op.
587 */
588 max_len = 65536 * 4096;
589 max_len = min_t(unsigned long long, max_len, end - start + 1);
590 max_len = min_t(unsigned long long, max_len, i_size - start);
David Howells810caa32020-10-30 10:01:09 +0000591
David Howellse87b03f2020-10-20 09:33:45 +0100592 if (len < max_len &&
David Howells78525c72021-08-11 09:49:13 +0100593 (to == folio_size(folio) || new_content))
David Howellse87b03f2020-10-20 09:33:45 +0100594 afs_extend_writeback(mapping, vnode, &count,
David Howellsc7f75ef2020-02-06 14:22:30 +0000595 start, max_len, new_content,
596 caching, &len);
David Howellse87b03f2020-10-20 09:33:45 +0100597 len = min_t(loff_t, len, max_len);
598 }
David Howells810caa32020-10-30 10:01:09 +0000599
David Howells4343d002017-11-02 15:27:52 +0000600 /* We now have a contiguous set of dirty pages, each with writeback
601 * set; the first page is still locked at this point, but all the rest
602 * have been unlocked.
603 */
David Howells78525c72021-08-11 09:49:13 +0100604 folio_unlock(folio);
David Howells4343d002017-11-02 15:27:52 +0000605
David Howellse87b03f2020-10-20 09:33:45 +0100606 if (start < i_size) {
607 _debug("write back %x @%llx [%llx]", len, start, i_size);
David Howells31143d52007-05-09 02:33:46 -0700608
David Howellsc7f75ef2020-02-06 14:22:30 +0000609 /* Speculatively write to the cache. We have to fix this up
610 * later if the store fails.
611 */
612 afs_write_to_cache(vnode, start, len, i_size, caching);
613
David Howellse87b03f2020-10-20 09:33:45 +0100614 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
615 ret = afs_store_data(vnode, &iter, start, false);
David Howellsbd80d8a2020-02-06 14:22:28 +0000616 } else {
David Howellse87b03f2020-10-20 09:33:45 +0100617 _debug("write discard %x @%llx [%llx]", len, start, i_size);
618
David Howellsbd80d8a2020-02-06 14:22:28 +0000619 /* The dirty region was entirely beyond the EOF. */
David Howellsc7f75ef2020-02-06 14:22:30 +0000620 fscache_clear_page_bits(afs_vnode_cache(vnode),
621 mapping, start, len, caching);
David Howellse87b03f2020-10-20 09:33:45 +0100622 afs_pages_written_back(vnode, start, len);
David Howellsbd80d8a2020-02-06 14:22:28 +0000623 ret = 0;
624 }
625
David Howells4343d002017-11-02 15:27:52 +0000626 switch (ret) {
627 case 0:
David Howellse87b03f2020-10-20 09:33:45 +0100628 wbc->nr_to_write = count;
629 ret = len;
David Howells4343d002017-11-02 15:27:52 +0000630 break;
631
632 default:
633 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500634 fallthrough;
David Howells4343d002017-11-02 15:27:52 +0000635 case -EACCES:
636 case -EPERM:
637 case -ENOKEY:
638 case -EKEYEXPIRED:
639 case -EKEYREJECTED:
640 case -EKEYREVOKED:
David Howellse87b03f2020-10-20 09:33:45 +0100641 afs_redirty_pages(wbc, mapping, start, len);
David Howells4343d002017-11-02 15:27:52 +0000642 mapping_set_error(mapping, ret);
643 break;
644
645 case -EDQUOT:
646 case -ENOSPC:
David Howellse87b03f2020-10-20 09:33:45 +0100647 afs_redirty_pages(wbc, mapping, start, len);
David Howells4343d002017-11-02 15:27:52 +0000648 mapping_set_error(mapping, -ENOSPC);
649 break;
650
651 case -EROFS:
652 case -EIO:
653 case -EREMOTEIO:
654 case -EFBIG:
655 case -ENOENT:
656 case -ENOMEDIUM:
657 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100658 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howellse87b03f2020-10-20 09:33:45 +0100659 afs_kill_pages(mapping, start, len);
David Howells4343d002017-11-02 15:27:52 +0000660 mapping_set_error(mapping, ret);
661 break;
David Howells31143d52007-05-09 02:33:46 -0700662 }
663
664 _leave(" = %d", ret);
665 return ret;
666}
667
668/*
669 * write a page back to the server
670 * - the caller locked the page for us
671 */
David Howells78525c72021-08-11 09:49:13 +0100672int afs_writepage(struct page *subpage, struct writeback_control *wbc)
David Howells31143d52007-05-09 02:33:46 -0700673{
David Howells78525c72021-08-11 09:49:13 +0100674 struct folio *folio = page_folio(subpage);
David Howellse87b03f2020-10-20 09:33:45 +0100675 ssize_t ret;
676 loff_t start;
David Howells31143d52007-05-09 02:33:46 -0700677
David Howells78525c72021-08-11 09:49:13 +0100678 _enter("{%lx},", folio_index(folio));
David Howells31143d52007-05-09 02:33:46 -0700679
David Howellsc7f75ef2020-02-06 14:22:30 +0000680#ifdef CONFIG_AFS_FSCACHE
681 folio_wait_fscache(folio);
682#endif
683
David Howells78525c72021-08-11 09:49:13 +0100684 start = folio_index(folio) * PAGE_SIZE;
685 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
686 folio, start, LLONG_MAX - start);
David Howells31143d52007-05-09 02:33:46 -0700687 if (ret < 0) {
David Howellse87b03f2020-10-20 09:33:45 +0100688 _leave(" = %zd", ret);
689 return ret;
David Howells31143d52007-05-09 02:33:46 -0700690 }
691
David Howells31143d52007-05-09 02:33:46 -0700692 _leave(" = 0");
693 return 0;
694}
695
696/*
697 * write a region of pages back to the server
698 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700699static int afs_writepages_region(struct address_space *mapping,
700 struct writeback_control *wbc,
David Howellse87b03f2020-10-20 09:33:45 +0100701 loff_t start, loff_t end, loff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700702{
David Howells78525c72021-08-11 09:49:13 +0100703 struct folio *folio;
704 struct page *head_page;
David Howellse87b03f2020-10-20 09:33:45 +0100705 ssize_t ret;
706 int n;
David Howells31143d52007-05-09 02:33:46 -0700707
David Howellse87b03f2020-10-20 09:33:45 +0100708 _enter("%llx,%llx,", start, end);
David Howells31143d52007-05-09 02:33:46 -0700709
710 do {
David Howellse87b03f2020-10-20 09:33:45 +0100711 pgoff_t index = start / PAGE_SIZE;
712
713 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
David Howells78525c72021-08-11 09:49:13 +0100714 PAGECACHE_TAG_DIRTY, 1, &head_page);
David Howells31143d52007-05-09 02:33:46 -0700715 if (!n)
716 break;
717
David Howells78525c72021-08-11 09:49:13 +0100718 folio = page_folio(head_page);
719 start = folio_pos(folio); /* May regress with THPs */
David Howellse87b03f2020-10-20 09:33:45 +0100720
David Howells78525c72021-08-11 09:49:13 +0100721 _debug("wback %lx", folio_index(folio));
David Howells31143d52007-05-09 02:33:46 -0700722
David Howellse87b03f2020-10-20 09:33:45 +0100723 /* At this point we hold neither the i_pages lock nor the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700724 * page lock: the page may be truncated or invalidated
725 * (changing page->mapping to NULL), or even swizzled
726 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700727 */
David Howellse87b03f2020-10-20 09:33:45 +0100728 if (wbc->sync_mode != WB_SYNC_NONE) {
David Howells78525c72021-08-11 09:49:13 +0100729 ret = folio_lock_killable(folio);
David Howellse87b03f2020-10-20 09:33:45 +0100730 if (ret < 0) {
David Howells78525c72021-08-11 09:49:13 +0100731 folio_put(folio);
David Howellse87b03f2020-10-20 09:33:45 +0100732 return ret;
733 }
734 } else {
David Howells78525c72021-08-11 09:49:13 +0100735 if (!folio_trylock(folio)) {
736 folio_put(folio);
David Howellse87b03f2020-10-20 09:33:45 +0100737 return 0;
738 }
David Howells4343d002017-11-02 15:27:52 +0000739 }
David Howells31143d52007-05-09 02:33:46 -0700740
David Howells78525c72021-08-11 09:49:13 +0100741 if (folio_mapping(folio) != mapping ||
742 !folio_test_dirty(folio)) {
743 start += folio_size(folio);
744 folio_unlock(folio);
745 folio_put(folio);
David Howells31143d52007-05-09 02:33:46 -0700746 continue;
747 }
748
David Howellsc7f75ef2020-02-06 14:22:30 +0000749 if (folio_test_writeback(folio) ||
750 folio_test_fscache(folio)) {
David Howells78525c72021-08-11 09:49:13 +0100751 folio_unlock(folio);
David Howellsc7f75ef2020-02-06 14:22:30 +0000752 if (wbc->sync_mode != WB_SYNC_NONE) {
David Howells78525c72021-08-11 09:49:13 +0100753 folio_wait_writeback(folio);
David Howellsc7f75ef2020-02-06 14:22:30 +0000754#ifdef CONFIG_AFS_FSCACHE
755 folio_wait_fscache(folio);
756#endif
757 }
David Howells78525c72021-08-11 09:49:13 +0100758 folio_put(folio);
David Howells31143d52007-05-09 02:33:46 -0700759 continue;
760 }
761
David Howells78525c72021-08-11 09:49:13 +0100762 if (!folio_clear_dirty_for_io(folio))
David Howells65a15102017-03-16 16:27:49 +0000763 BUG();
David Howells78525c72021-08-11 09:49:13 +0100764 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
765 folio_put(folio);
David Howells31143d52007-05-09 02:33:46 -0700766 if (ret < 0) {
David Howellse87b03f2020-10-20 09:33:45 +0100767 _leave(" = %zd", ret);
David Howells31143d52007-05-09 02:33:46 -0700768 return ret;
769 }
770
Marc Dionnedc255732021-06-06 21:21:27 +0100771 start += ret;
David Howells31143d52007-05-09 02:33:46 -0700772
David Howells31143d52007-05-09 02:33:46 -0700773 cond_resched();
David Howellse87b03f2020-10-20 09:33:45 +0100774 } while (wbc->nr_to_write > 0);
David Howells31143d52007-05-09 02:33:46 -0700775
David Howellse87b03f2020-10-20 09:33:45 +0100776 *_next = start;
777 _leave(" = 0 [%llx]", *_next);
David Howells31143d52007-05-09 02:33:46 -0700778 return 0;
779}
780
781/*
782 * write some of the pending data back to the server
783 */
784int afs_writepages(struct address_space *mapping,
785 struct writeback_control *wbc)
786{
David Howellsec0fa0b2020-10-07 14:22:12 +0100787 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellse87b03f2020-10-20 09:33:45 +0100788 loff_t start, next;
David Howells31143d52007-05-09 02:33:46 -0700789 int ret;
790
791 _enter("");
792
David Howellsec0fa0b2020-10-07 14:22:12 +0100793 /* We have to be careful as we can end up racing with setattr()
794 * truncating the pagecache since the caller doesn't take a lock here
795 * to prevent it.
796 */
797 if (wbc->sync_mode == WB_SYNC_ALL)
798 down_read(&vnode->validate_lock);
799 else if (!down_read_trylock(&vnode->validate_lock))
800 return 0;
801
David Howells31143d52007-05-09 02:33:46 -0700802 if (wbc->range_cyclic) {
David Howellse87b03f2020-10-20 09:33:45 +0100803 start = mapping->writeback_index * PAGE_SIZE;
804 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
Tom Rixafe69492021-04-30 08:50:31 -0700805 if (ret == 0) {
806 mapping->writeback_index = next / PAGE_SIZE;
807 if (start > 0 && wbc->nr_to_write > 0) {
808 ret = afs_writepages_region(mapping, wbc, 0,
809 start, &next);
810 if (ret == 0)
811 mapping->writeback_index =
812 next / PAGE_SIZE;
813 }
814 }
David Howells31143d52007-05-09 02:33:46 -0700815 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
David Howellse87b03f2020-10-20 09:33:45 +0100816 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
Tom Rixafe69492021-04-30 08:50:31 -0700817 if (wbc->nr_to_write > 0 && ret == 0)
David Howells5a972472021-07-12 17:04:47 +0100818 mapping->writeback_index = next / PAGE_SIZE;
David Howells31143d52007-05-09 02:33:46 -0700819 } else {
David Howellse87b03f2020-10-20 09:33:45 +0100820 ret = afs_writepages_region(mapping, wbc,
821 wbc->range_start, wbc->range_end, &next);
David Howells31143d52007-05-09 02:33:46 -0700822 }
823
David Howellsec0fa0b2020-10-07 14:22:12 +0100824 up_read(&vnode->validate_lock);
David Howells31143d52007-05-09 02:33:46 -0700825 _leave(" = %d", ret);
826 return ret;
827}
828
829/*
David Howells31143d52007-05-09 02:33:46 -0700830 * write to an AFS file
831 */
Al Viro50b55512014-04-03 14:13:46 -0400832ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700833{
Al Viro496ad9a2013-01-23 17:07:38 -0500834 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells3978d812021-09-01 19:22:50 +0100835 struct afs_file *af = iocb->ki_filp->private_data;
David Howells31143d52007-05-09 02:33:46 -0700836 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400837 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700838
David Howells3b6492d2018-10-20 00:57:57 +0100839 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400840 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700841
842 if (IS_SWAPFILE(&vnode->vfs_inode)) {
843 printk(KERN_INFO
844 "AFS: Attempt to write to active swap file!\n");
845 return -EBUSY;
846 }
847
848 if (!count)
849 return 0;
850
David Howells3978d812021-09-01 19:22:50 +0100851 result = afs_validate(vnode, af->key);
852 if (result < 0)
853 return result;
854
Al Viro50b55512014-04-03 14:13:46 -0400855 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700856
David Howells31143d52007-05-09 02:33:46 -0700857 _leave(" = %zd", result);
858 return result;
859}
860
861/*
David Howells31143d52007-05-09 02:33:46 -0700862 * flush any dirty pages for this process, and check for write errors.
863 * - the return status from this call provides a reliable indication of
864 * whether any write errors occurred for this process.
865 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400866int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700867{
David Howells3978d812021-09-01 19:22:50 +0100868 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
869 struct afs_file *af = file->private_data;
870 int ret;
David Howells31143d52007-05-09 02:33:46 -0700871
David Howells3b6492d2018-10-20 00:57:57 +0100872 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400873 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700874 datasync);
875
David Howells3978d812021-09-01 19:22:50 +0100876 ret = afs_validate(vnode, af->key);
877 if (ret < 0)
878 return ret;
879
David Howells4343d002017-11-02 15:27:52 +0000880 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700881}
David Howells9b3f26c2009-04-03 16:42:41 +0100882
883/*
884 * notification that a previously read-only page is about to become writable
885 * - if it returns an error, the caller will deliver a bus error signal
886 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700887vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100888{
Matthew Wilcox (Oracle)490e0162021-03-04 11:09:17 -0500889 struct folio *folio = page_folio(vmf->page);
David Howells1cf7a152017-11-02 15:27:52 +0000890 struct file *file = vmf->vma->vm_file;
891 struct inode *inode = file_inode(file);
892 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells3978d812021-09-01 19:22:50 +0100893 struct afs_file *af = file->private_data;
David Howells1cf7a152017-11-02 15:27:52 +0000894 unsigned long priv;
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100895 vm_fault_t ret = VM_FAULT_RETRY;
David Howells9b3f26c2009-04-03 16:42:41 +0100896
David Howells78525c72021-08-11 09:49:13 +0100897 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
David Howells9b3f26c2009-04-03 16:42:41 +0100898
David Howells3978d812021-09-01 19:22:50 +0100899 afs_validate(vnode, af->key);
900
David Howells1cf7a152017-11-02 15:27:52 +0000901 sb_start_pagefault(inode->i_sb);
902
903 /* Wait for the page to be written to the cache before we allow it to
904 * be modified. We then assume the entire page will need writing back.
905 */
David Howells630f5dd2020-02-06 14:22:28 +0000906#ifdef CONFIG_AFS_FSCACHE
David Howells78525c72021-08-11 09:49:13 +0100907 if (folio_test_fscache(folio) &&
908 folio_wait_fscache_killable(folio) < 0)
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100909 goto out;
David Howells630f5dd2020-02-06 14:22:28 +0000910#endif
David Howells9b3f26c2009-04-03 16:42:41 +0100911
Matthew Wilcox (Oracle)490e0162021-03-04 11:09:17 -0500912 if (folio_wait_writeback_killable(folio))
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100913 goto out;
David Howells1cf7a152017-11-02 15:27:52 +0000914
David Howells78525c72021-08-11 09:49:13 +0100915 if (folio_lock_killable(folio) < 0)
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100916 goto out;
David Howells1cf7a152017-11-02 15:27:52 +0000917
David Howells78525c72021-08-11 09:49:13 +0100918 /* We mustn't change folio->private until writeback is complete as that
David Howells1cf7a152017-11-02 15:27:52 +0000919 * details the portion of the page we need to write back and we might
920 * need to redirty the page if there's a problem.
921 */
Matthew Wilcox (Oracle)490e0162021-03-04 11:09:17 -0500922 if (folio_wait_writeback_killable(folio) < 0) {
923 folio_unlock(folio);
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100924 goto out;
David Howells5cbf0392020-02-06 14:22:29 +0000925 }
David Howells1cf7a152017-11-02 15:27:52 +0000926
David Howells78525c72021-08-11 09:49:13 +0100927 priv = afs_folio_dirty(folio, 0, folio_size(folio));
928 priv = afs_folio_dirty_mmapped(priv);
929 if (folio_test_private(folio)) {
930 folio_change_private(folio, (void *)priv);
931 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
David Howellse87b03f2020-10-20 09:33:45 +0100932 } else {
David Howells78525c72021-08-11 09:49:13 +0100933 folio_attach_private(folio, (void *)priv);
934 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
David Howellse87b03f2020-10-20 09:33:45 +0100935 }
David Howellsbb413482020-06-12 00:15:13 +0100936 file_update_time(file);
David Howells1cf7a152017-11-02 15:27:52 +0000937
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100938 ret = VM_FAULT_LOCKED;
939out:
David Howells1cf7a152017-11-02 15:27:52 +0000940 sb_end_pagefault(inode->i_sb);
Matthew Wilcox (Oracle)9620ad82021-06-16 22:22:28 +0100941 return ret;
David Howells9b3f26c2009-04-03 16:42:41 +0100942}
David Howells4343d002017-11-02 15:27:52 +0000943
944/*
945 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
946 */
947void afs_prune_wb_keys(struct afs_vnode *vnode)
948{
949 LIST_HEAD(graveyard);
950 struct afs_wb_key *wbk, *tmp;
951
952 /* Discard unused keys */
953 spin_lock(&vnode->wb_lock);
954
955 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
956 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
957 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
958 if (refcount_read(&wbk->usage) == 1)
959 list_move(&wbk->vnode_link, &graveyard);
960 }
961 }
962
963 spin_unlock(&vnode->wb_lock);
964
965 while (!list_empty(&graveyard)) {
966 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
967 list_del(&wbk->vnode_link);
968 afs_put_wb_key(wbk);
969 }
970}
971
972/*
973 * Clean up a page during invalidation.
974 */
David Howells78525c72021-08-11 09:49:13 +0100975int afs_launder_page(struct page *subpage)
David Howells4343d002017-11-02 15:27:52 +0000976{
David Howells78525c72021-08-11 09:49:13 +0100977 struct folio *folio = page_folio(subpage);
978 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
David Howellsbd80d8a2020-02-06 14:22:28 +0000979 struct iov_iter iter;
980 struct bio_vec bv[1];
David Howells4343d002017-11-02 15:27:52 +0000981 unsigned long priv;
982 unsigned int f, t;
983 int ret = 0;
984
David Howells78525c72021-08-11 09:49:13 +0100985 _enter("{%lx}", folio_index(folio));
David Howells4343d002017-11-02 15:27:52 +0000986
David Howells78525c72021-08-11 09:49:13 +0100987 priv = (unsigned long)folio_get_private(folio);
988 if (folio_clear_dirty_for_io(folio)) {
David Howells4343d002017-11-02 15:27:52 +0000989 f = 0;
David Howells78525c72021-08-11 09:49:13 +0100990 t = folio_size(folio);
991 if (folio_test_private(folio)) {
992 f = afs_folio_dirty_from(folio, priv);
993 t = afs_folio_dirty_to(folio, priv);
David Howells4343d002017-11-02 15:27:52 +0000994 }
995
David Howells78525c72021-08-11 09:49:13 +0100996 bv[0].bv_page = &folio->page;
David Howellsbd80d8a2020-02-06 14:22:28 +0000997 bv[0].bv_offset = f;
998 bv[0].bv_len = t - f;
999 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
1000
David Howells78525c72021-08-11 09:49:13 +01001001 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1002 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
David Howells4343d002017-11-02 15:27:52 +00001003 }
1004
David Howells78525c72021-08-11 09:49:13 +01001005 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1006 folio_detach_private(folio);
1007 folio_wait_fscache(folio);
David Howells4343d002017-11-02 15:27:52 +00001008 return ret;
David Howells31143d52007-05-09 02:33:46 -07001009}
David Howellsc7f75ef2020-02-06 14:22:30 +00001010
1011/*
1012 * Deal with the completion of writing the data to the cache.
1013 */
1014static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1015 bool was_async)
1016{
1017 struct afs_vnode *vnode = priv;
1018
1019 if (IS_ERR_VALUE(transferred_or_error) &&
1020 transferred_or_error != -ENOBUFS)
1021 afs_invalidate_cache(vnode, 0);
1022}
1023
1024/*
1025 * Save the write to the cache also.
1026 */
1027static void afs_write_to_cache(struct afs_vnode *vnode,
1028 loff_t start, size_t len, loff_t i_size,
1029 bool caching)
1030{
1031 fscache_write_to_cache(afs_vnode_cache(vnode),
1032 vnode->vfs_inode.i_mapping, start, len, i_size,
1033 afs_write_to_cache_done, vnode, caching);
1034}