blob: 97bccde3298be8e1365ad8301d7eb886d6459482 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells31143d52007-05-09 02:33:46 -07002/* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells31143d52007-05-09 02:33:46 -07006 */
David Howells4343d002017-11-02 15:27:52 +00007
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -07008#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -07009#include <linux/slab.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
David Howells31143d52007-05-09 02:33:46 -070016/*
17 * mark a page as having been made dirty and thus needing writeback
18 */
19int afs_set_page_dirty(struct page *page)
20{
21 _enter("");
22 return __set_page_dirty_nobuffers(page);
23}
24
25/*
David Howells31143d52007-05-09 02:33:46 -070026 * partly or wholly fill a page that's under preparation for writing
27 */
28static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000029 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070030{
David Howells196ee9c2017-01-05 10:38:34 +000031 struct afs_read *req;
David Howells2a0b4f62018-10-20 00:57:57 +010032 size_t p;
33 void *data;
David Howells31143d52007-05-09 02:33:46 -070034 int ret;
35
Anton Blanchard5e7f2332011-06-13 22:31:12 +010036 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070037
David Howells2a0b4f62018-10-20 00:57:57 +010038 if (pos >= vnode->vfs_inode.i_size) {
39 p = pos & ~PAGE_MASK;
40 ASSERTCMP(p + len, <=, PAGE_SIZE);
41 data = kmap(page);
42 memset(data + p, 0, len);
43 kunmap(page);
44 return 0;
45 }
46
Zhengyuan Liuee102582019-06-20 18:12:17 +010047 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
David Howells196ee9c2017-01-05 10:38:34 +000048 if (!req)
49 return -ENOMEM;
50
David Howellsf3ddee82018-04-06 14:17:25 +010051 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000052 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000053 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000054 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010055 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000056 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000057 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000058
David Howellsd2ddc772017-11-02 15:27:50 +000059 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000060 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070061 if (ret < 0) {
62 if (ret == -ENOENT) {
63 _debug("got NOENT from server"
64 " - marking file deleted and stale");
65 set_bit(AFS_VNODE_DELETED, &vnode->flags);
66 ret = -ESTALE;
67 }
68 }
69
70 _leave(" = %d", ret);
71 return ret;
72}
73
74/*
David Howells31143d52007-05-09 02:33:46 -070075 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070076 */
Nick Piggin15b46502008-10-15 22:04:32 -070077int afs_write_begin(struct file *file, struct address_space *mapping,
78 loff_t pos, unsigned len, unsigned flags,
79 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070080{
Al Viro496ad9a2013-01-23 17:07:38 -050081 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070082 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000083 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000084 unsigned long priv;
85 unsigned f, from = pos & (PAGE_SIZE - 1);
86 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030087 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070088 int ret;
89
David Howells3b6492d2018-10-20 00:57:57 +010090 _enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070091 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070092
David Howells4343d002017-11-02 15:27:52 +000093 /* We want to store information about how much of a page is altered in
94 * page->private.
95 */
96 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070097
Nick Piggin54566b22009-01-04 12:00:53 -080098 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000099 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -0700100 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -0700101
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000103 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700104 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000105 unlock_page(page);
106 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700107 _leave(" = %d [prep]", ret);
108 return ret;
109 }
Nick Piggin15b46502008-10-15 22:04:32 -0700110 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700111 }
112
David Howells6d06b0d2017-03-16 16:27:48 +0000113 /* page won't leak in error case: it eventually gets cleaned off LRU */
114 *pagep = page;
115
David Howells31143d52007-05-09 02:33:46 -0700116try_again:
David Howells4343d002017-11-02 15:27:52 +0000117 /* See if this page is already partially written in a way that we can
118 * merge the new write with.
119 */
120 t = f = 0;
121 if (PagePrivate(page)) {
122 priv = page_private(page);
123 f = priv & AFS_PRIV_MAX;
124 t = priv >> AFS_PRIV_SHIFT;
125 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700126 }
127
David Howells4343d002017-11-02 15:27:52 +0000128 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000129 if (PageWriteback(page)) {
130 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
131 page->index, priv);
132 goto flush_conflicting_write;
133 }
David Howells5a813272018-04-06 14:17:26 +0100134 /* If the file is being filled locally, allow inter-write
135 * spaces to be merged into writes. If it's not, only write
136 * back what the user gives us.
137 */
138 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
139 (to < f || from > t))
David Howells4343d002017-11-02 15:27:52 +0000140 goto flush_conflicting_write;
141 if (from < f)
142 f = from;
143 if (to > t)
144 t = to;
145 } else {
146 f = from;
147 t = to;
David Howells31143d52007-05-09 02:33:46 -0700148 }
149
David Howells4343d002017-11-02 15:27:52 +0000150 priv = (unsigned long)t << AFS_PRIV_SHIFT;
151 priv |= f;
David Howells13524ab2017-11-02 15:27:53 +0000152 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
153 page->index, priv);
David Howells31143d52007-05-09 02:33:46 -0700154 SetPagePrivate(page);
David Howells4343d002017-11-02 15:27:52 +0000155 set_page_private(page, priv);
156 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700157 return 0;
158
David Howells4343d002017-11-02 15:27:52 +0000159 /* The previous write and this write aren't adjacent or overlapping, so
160 * flush the page out.
161 */
162flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700163 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000164 ret = write_one_page(page);
165 if (ret < 0) {
166 _leave(" = %d", ret);
167 return ret;
David Howells31143d52007-05-09 02:33:46 -0700168 }
169
David Howells4343d002017-11-02 15:27:52 +0000170 ret = lock_page_killable(page);
171 if (ret < 0) {
172 _leave(" = %d", ret);
173 return ret;
174 }
David Howells31143d52007-05-09 02:33:46 -0700175 goto try_again;
176}
177
178/*
179 * finalise part of a write to a page
180 */
Nick Piggin15b46502008-10-15 22:04:32 -0700181int afs_write_end(struct file *file, struct address_space *mapping,
182 loff_t pos, unsigned len, unsigned copied,
183 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700184{
Al Viro496ad9a2013-01-23 17:07:38 -0500185 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000186 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700187 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000188 int ret;
David Howells31143d52007-05-09 02:33:46 -0700189
David Howells3b6492d2018-10-20 00:57:57 +0100190 _enter("{%llx:%llu},{%lx}",
Nick Piggin15b46502008-10-15 22:04:32 -0700191 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700192
Nick Piggin15b46502008-10-15 22:04:32 -0700193 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700194
195 i_size = i_size_read(&vnode->vfs_inode);
196 if (maybe_i_size > i_size) {
David Howells4343d002017-11-02 15:27:52 +0000197 spin_lock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700198 i_size = i_size_read(&vnode->vfs_inode);
199 if (maybe_i_size > i_size)
200 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells4343d002017-11-02 15:27:52 +0000201 spin_unlock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700202 }
203
David Howellse8e581a2017-03-16 16:27:44 +0000204 if (!PageUptodate(page)) {
205 if (copied < len) {
206 /* Try and load any missing data from the server. The
207 * unmarshalling routine will take care of clearing any
208 * bits that are beyond the EOF.
209 */
210 ret = afs_fill_page(vnode, key, pos + copied,
211 len - copied, page);
212 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000213 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000214 }
215 SetPageUptodate(page);
216 }
217
David Howells31143d52007-05-09 02:33:46 -0700218 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700219 if (PageDirty(page))
220 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000221 ret = copied;
222
223out:
Nick Piggin15b46502008-10-15 22:04:32 -0700224 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300225 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000226 return ret;
David Howells31143d52007-05-09 02:33:46 -0700227}
228
229/*
230 * kill all the pages in the given range
231 */
David Howells4343d002017-11-02 15:27:52 +0000232static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700233 pgoff_t first, pgoff_t last)
234{
David Howells4343d002017-11-02 15:27:52 +0000235 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700236 struct pagevec pv;
237 unsigned count, loop;
238
David Howells3b6492d2018-10-20 00:57:57 +0100239 _enter("{%llx:%llu},%lx-%lx",
David Howells31143d52007-05-09 02:33:46 -0700240 vnode->fid.vid, vnode->fid.vnode, first, last);
241
Mel Gorman86679822017-11-15 17:37:52 -0800242 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700243
244 do {
245 _debug("kill %lx-%lx", first, last);
246
247 count = last - first + 1;
248 if (count > PAGEVEC_SIZE)
249 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000250 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700251 ASSERTCMP(pv.nr, ==, count);
252
253 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000254 struct page *page = pv.pages[loop];
255 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000256 SetPageError(page);
257 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000258 if (page->index >= first)
259 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000260 lock_page(page);
261 generic_error_remove_page(mapping, page);
Marc Dionne21bd68f2019-04-13 08:37:37 +0100262 unlock_page(page);
David Howells31143d52007-05-09 02:33:46 -0700263 }
264
265 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000266 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700267
268 _leave("");
269}
270
271/*
David Howells4343d002017-11-02 15:27:52 +0000272 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700273 */
David Howells4343d002017-11-02 15:27:52 +0000274static void afs_redirty_pages(struct writeback_control *wbc,
275 struct address_space *mapping,
276 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700277{
David Howells4343d002017-11-02 15:27:52 +0000278 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
279 struct pagevec pv;
280 unsigned count, loop;
281
David Howells3b6492d2018-10-20 00:57:57 +0100282 _enter("{%llx:%llu},%lx-%lx",
David Howells4343d002017-11-02 15:27:52 +0000283 vnode->fid.vid, vnode->fid.vnode, first, last);
284
Linus Torvalds487e2c92017-11-16 11:41:22 -0800285 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000286
287 do {
288 _debug("redirty %lx-%lx", first, last);
289
290 count = last - first + 1;
291 if (count > PAGEVEC_SIZE)
292 count = PAGEVEC_SIZE;
293 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
294 ASSERTCMP(pv.nr, ==, count);
295
296 for (loop = 0; loop < count; loop++) {
297 struct page *page = pv.pages[loop];
298
299 redirty_page_for_writepage(wbc, page);
300 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700301 if (page->index >= first)
302 first = page->index + 1;
303 }
304
305 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000306 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700307
308 _leave("");
309}
310
311/*
David Howellsa58823a2019-05-09 15:16:10 +0100312 * completion of write to server
313 */
314static void afs_pages_written_back(struct afs_vnode *vnode,
315 pgoff_t first, pgoff_t last)
316{
317 struct pagevec pv;
318 unsigned long priv;
319 unsigned count, loop;
320
321 _enter("{%llx:%llu},{%lx-%lx}",
322 vnode->fid.vid, vnode->fid.vnode, first, last);
323
324 pagevec_init(&pv);
325
326 do {
327 _debug("done %lx-%lx", first, last);
328
329 count = last - first + 1;
330 if (count > PAGEVEC_SIZE)
331 count = PAGEVEC_SIZE;
332 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
333 first, count, pv.pages);
334 ASSERTCMP(pv.nr, ==, count);
335
336 for (loop = 0; loop < count; loop++) {
337 priv = page_private(pv.pages[loop]);
338 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
339 pv.pages[loop]->index, priv);
340 set_page_private(pv.pages[loop], 0);
341 end_page_writeback(pv.pages[loop]);
342 }
343 first += count;
344 __pagevec_release(&pv);
345 } while (first <= last);
346
347 afs_prune_wb_keys(vnode);
348 _leave("");
349}
350
351/*
David Howellse49c7b22020-04-10 20:51:51 +0100352 * Find a key to use for the writeback. We cached the keys used to author the
353 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
354 * and we need to start from there if it's set.
355 */
356static int afs_get_writeback_key(struct afs_vnode *vnode,
357 struct afs_wb_key **_wbk)
358{
359 struct afs_wb_key *wbk = NULL;
360 struct list_head *p;
361 int ret = -ENOKEY, ret2;
362
363 spin_lock(&vnode->wb_lock);
364 if (*_wbk)
365 p = (*_wbk)->vnode_link.next;
366 else
367 p = vnode->wb_keys.next;
368
369 while (p != &vnode->wb_keys) {
370 wbk = list_entry(p, struct afs_wb_key, vnode_link);
371 _debug("wbk %u", key_serial(wbk->key));
372 ret2 = key_validate(wbk->key);
373 if (ret2 == 0) {
374 refcount_inc(&wbk->usage);
375 _debug("USE WB KEY %u", key_serial(wbk->key));
376 break;
377 }
378
379 wbk = NULL;
380 if (ret == -ENOKEY)
381 ret = ret2;
382 p = p->next;
383 }
384
385 spin_unlock(&vnode->wb_lock);
386 if (*_wbk)
387 afs_put_wb_key(*_wbk);
388 *_wbk = wbk;
389 return 0;
390}
391
392static void afs_store_data_success(struct afs_operation *op)
393{
394 struct afs_vnode *vnode = op->file[0].vnode;
395
396 afs_vnode_commit_status(op, &op->file[0]);
397 if (op->error == 0) {
398 afs_pages_written_back(vnode, op->store.first, op->store.last);
399 afs_stat_v(vnode, n_stores);
400 atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
401 (op->store.first * PAGE_SIZE + op->store.first_offset),
402 &afs_v2net(vnode)->n_store_bytes);
403 }
404}
405
406static const struct afs_operation_ops afs_store_data_operation = {
407 .issue_afs_rpc = afs_fs_store_data,
408 .issue_yfs_rpc = yfs_fs_store_data,
409 .success = afs_store_data_success,
410};
411
412/*
David Howellsd2ddc772017-11-02 15:27:50 +0000413 * write to a file
414 */
David Howells4343d002017-11-02 15:27:52 +0000415static int afs_store_data(struct address_space *mapping,
416 pgoff_t first, pgoff_t last,
David Howellsd2ddc772017-11-02 15:27:50 +0000417 unsigned offset, unsigned to)
418{
David Howells4343d002017-11-02 15:27:52 +0000419 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellse49c7b22020-04-10 20:51:51 +0100420 struct afs_operation *op;
David Howells4343d002017-11-02 15:27:52 +0000421 struct afs_wb_key *wbk = NULL;
David Howellse49c7b22020-04-10 20:51:51 +0100422 int ret;
David Howellsd2ddc772017-11-02 15:27:50 +0000423
David Howells3b6492d2018-10-20 00:57:57 +0100424 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000425 vnode->volume->name,
426 vnode->fid.vid,
427 vnode->fid.vnode,
428 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000429 first, last, offset, to);
430
David Howellse49c7b22020-04-10 20:51:51 +0100431 ret = afs_get_writeback_key(vnode, &wbk);
432 if (ret) {
433 _leave(" = %d [no keys]", ret);
434 return ret;
435 }
436
437 op = afs_alloc_operation(wbk->key, vnode->volume);
438 if (IS_ERR(op)) {
439 afs_put_wb_key(wbk);
David Howellsa58823a2019-05-09 15:16:10 +0100440 return -ENOMEM;
David Howellse49c7b22020-04-10 20:51:51 +0100441 }
David Howellsa58823a2019-05-09 15:16:10 +0100442
David Howellse49c7b22020-04-10 20:51:51 +0100443 afs_op_set_vnode(op, 0, vnode);
444 op->file[0].dv_delta = 1;
445 op->store.mapping = mapping;
446 op->store.first = first;
447 op->store.last = last;
448 op->store.first_offset = offset;
449 op->store.last_to = to;
450 op->ops = &afs_store_data_operation;
David Howells4343d002017-11-02 15:27:52 +0000451
David Howells4343d002017-11-02 15:27:52 +0000452try_next_key:
David Howellse49c7b22020-04-10 20:51:51 +0100453 afs_begin_vnode_operation(op);
454 afs_wait_for_operation(op);
David Howells4343d002017-11-02 15:27:52 +0000455
David Howellse49c7b22020-04-10 20:51:51 +0100456 switch (op->error) {
David Howells4343d002017-11-02 15:27:52 +0000457 case -EACCES:
458 case -EPERM:
459 case -ENOKEY:
460 case -EKEYEXPIRED:
461 case -EKEYREJECTED:
462 case -EKEYREVOKED:
463 _debug("next");
David Howellse49c7b22020-04-10 20:51:51 +0100464
465 ret = afs_get_writeback_key(vnode, &wbk);
466 if (ret == 0) {
467 key_put(op->key);
468 op->key = key_get(wbk->key);
469 goto try_next_key;
470 }
471 break;
David Howells4343d002017-11-02 15:27:52 +0000472 }
473
474 afs_put_wb_key(wbk);
David Howellse49c7b22020-04-10 20:51:51 +0100475 _leave(" = %d", op->error);
476 return afs_put_operation(op);
David Howellsd2ddc772017-11-02 15:27:50 +0000477}
478
479/*
David Howells4343d002017-11-02 15:27:52 +0000480 * Synchronously write back the locked page and any subsequent non-locked dirty
481 * pages.
David Howells31143d52007-05-09 02:33:46 -0700482 */
David Howells4343d002017-11-02 15:27:52 +0000483static int afs_write_back_from_locked_page(struct address_space *mapping,
484 struct writeback_control *wbc,
485 struct page *primary_page,
486 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700487{
David Howells13524ab2017-11-02 15:27:53 +0000488 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700489 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000490 unsigned long count, priv;
491 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700492 pgoff_t start, first, last;
493 int loop, ret;
494
495 _enter(",%lx", primary_page->index);
496
497 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700498 if (test_set_page_writeback(primary_page))
499 BUG();
500
David Howells4343d002017-11-02 15:27:52 +0000501 /* Find all consecutive lockable dirty pages that have contiguous
502 * written regions, stopping when we find a page that is not
503 * immediately lockable, is not dirty or is missing, or we reach the
504 * end of the range.
505 */
David Howells31143d52007-05-09 02:33:46 -0700506 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000507 priv = page_private(primary_page);
508 offset = priv & AFS_PRIV_MAX;
509 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000510 trace_afs_page_dirty(vnode, tracepoint_string("store"),
511 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000512
513 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000514 if (offset == to)
515 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
516 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000517
David Howells5a813272018-04-06 14:17:26 +0100518 if (start >= final_page ||
519 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
David Howells31143d52007-05-09 02:33:46 -0700520 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000521
David Howells31143d52007-05-09 02:33:46 -0700522 start++;
523 do {
524 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000525 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700526 if (n > ARRAY_SIZE(pages))
527 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000528 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700529 _debug("fgpc %u", n);
530 if (n == 0)
531 goto no_more;
532 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700533 do {
534 put_page(pages[--n]);
535 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700536 goto no_more;
537 }
538
539 for (loop = 0; loop < n; loop++) {
540 page = pages[loop];
David Howells5a813272018-04-06 14:17:26 +0100541 if (to != PAGE_SIZE &&
542 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
543 break;
David Howells4343d002017-11-02 15:27:52 +0000544 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700545 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200546 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700547 break;
David Howells4343d002017-11-02 15:27:52 +0000548 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700549 unlock_page(page);
550 break;
551 }
David Howells4343d002017-11-02 15:27:52 +0000552
553 priv = page_private(page);
554 f = priv & AFS_PRIV_MAX;
555 t = priv >> AFS_PRIV_SHIFT;
David Howells5a813272018-04-06 14:17:26 +0100556 if (f != 0 &&
557 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
David Howells4343d002017-11-02 15:27:52 +0000558 unlock_page(page);
559 break;
560 }
561 to = t;
562
David Howells13524ab2017-11-02 15:27:53 +0000563 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
564 page->index, priv);
565
David Howells31143d52007-05-09 02:33:46 -0700566 if (!clear_page_dirty_for_io(page))
567 BUG();
568 if (test_set_page_writeback(page))
569 BUG();
570 unlock_page(page);
571 put_page(page);
572 }
573 count += loop;
574 if (loop < n) {
575 for (; loop < n; loop++)
576 put_page(pages[loop]);
577 goto no_more;
578 }
579
580 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000581 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700582
583no_more:
David Howells4343d002017-11-02 15:27:52 +0000584 /* We now have a contiguous set of dirty pages, each with writeback
585 * set; the first page is still locked at this point, but all the rest
586 * have been unlocked.
587 */
588 unlock_page(primary_page);
589
David Howells31143d52007-05-09 02:33:46 -0700590 first = primary_page->index;
591 last = first + count - 1;
592
David Howells31143d52007-05-09 02:33:46 -0700593 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
594
David Howells4343d002017-11-02 15:27:52 +0000595 ret = afs_store_data(mapping, first, last, offset, to);
596 switch (ret) {
597 case 0:
David Howells31143d52007-05-09 02:33:46 -0700598 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000599 break;
600
601 default:
602 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
603 /* Fall through */
604 case -EACCES:
605 case -EPERM:
606 case -ENOKEY:
607 case -EKEYEXPIRED:
608 case -EKEYREJECTED:
609 case -EKEYREVOKED:
610 afs_redirty_pages(wbc, mapping, first, last);
611 mapping_set_error(mapping, ret);
612 break;
613
614 case -EDQUOT:
615 case -ENOSPC:
616 afs_redirty_pages(wbc, mapping, first, last);
617 mapping_set_error(mapping, -ENOSPC);
618 break;
619
620 case -EROFS:
621 case -EIO:
622 case -EREMOTEIO:
623 case -EFBIG:
624 case -ENOENT:
625 case -ENOMEDIUM:
626 case -ENXIO:
David Howellsf51375c2018-10-20 00:57:57 +0100627 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
David Howells4343d002017-11-02 15:27:52 +0000628 afs_kill_pages(mapping, first, last);
629 mapping_set_error(mapping, ret);
630 break;
David Howells31143d52007-05-09 02:33:46 -0700631 }
632
633 _leave(" = %d", ret);
634 return ret;
635}
636
637/*
638 * write a page back to the server
639 * - the caller locked the page for us
640 */
641int afs_writepage(struct page *page, struct writeback_control *wbc)
642{
David Howells31143d52007-05-09 02:33:46 -0700643 int ret;
644
645 _enter("{%lx},", page->index);
646
David Howells4343d002017-11-02 15:27:52 +0000647 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
648 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700649 if (ret < 0) {
650 _leave(" = %d", ret);
651 return 0;
652 }
653
654 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700655
656 _leave(" = 0");
657 return 0;
658}
659
660/*
661 * write a region of pages back to the server
662 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700663static int afs_writepages_region(struct address_space *mapping,
664 struct writeback_control *wbc,
665 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700666{
David Howells31143d52007-05-09 02:33:46 -0700667 struct page *page;
668 int ret, n;
669
670 _enter(",,%lx,%lx,", index, end);
671
672 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800673 n = find_get_pages_range_tag(mapping, &index, end,
674 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700675 if (!n)
676 break;
677
678 _debug("wback %lx", page->index);
679
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700680 /*
681 * at this point we hold neither the i_pages lock nor the
682 * page lock: the page may be truncated or invalidated
683 * (changing page->mapping to NULL), or even swizzled
684 * back from swapper_space to tmpfs file mapping
David Howells31143d52007-05-09 02:33:46 -0700685 */
David Howells4343d002017-11-02 15:27:52 +0000686 ret = lock_page_killable(page);
687 if (ret < 0) {
688 put_page(page);
689 _leave(" = %d", ret);
690 return ret;
691 }
David Howells31143d52007-05-09 02:33:46 -0700692
David Howellsc5051c72017-03-16 16:27:49 +0000693 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700694 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300695 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700696 continue;
697 }
698
David Howellsc5051c72017-03-16 16:27:49 +0000699 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700700 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000701 if (wbc->sync_mode != WB_SYNC_NONE)
702 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000703 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700704 continue;
705 }
706
David Howells65a15102017-03-16 16:27:49 +0000707 if (!clear_page_dirty_for_io(page))
708 BUG();
David Howells4343d002017-11-02 15:27:52 +0000709 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300710 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700711 if (ret < 0) {
712 _leave(" = %d", ret);
713 return ret;
714 }
715
716 wbc->nr_to_write -= ret;
717
David Howells31143d52007-05-09 02:33:46 -0700718 cond_resched();
719 } while (index < end && wbc->nr_to_write > 0);
720
721 *_next = index;
722 _leave(" = 0 [%lx]", *_next);
723 return 0;
724}
725
726/*
727 * write some of the pending data back to the server
728 */
729int afs_writepages(struct address_space *mapping,
730 struct writeback_control *wbc)
731{
David Howells31143d52007-05-09 02:33:46 -0700732 pgoff_t start, end, next;
733 int ret;
734
735 _enter("");
736
David Howells31143d52007-05-09 02:33:46 -0700737 if (wbc->range_cyclic) {
738 start = mapping->writeback_index;
739 end = -1;
740 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700741 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700742 ret = afs_writepages_region(mapping, wbc, 0, start,
743 &next);
744 mapping->writeback_index = next;
745 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300746 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700747 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
748 if (wbc->nr_to_write > 0)
749 mapping->writeback_index = next;
750 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300751 start = wbc->range_start >> PAGE_SHIFT;
752 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700753 ret = afs_writepages_region(mapping, wbc, start, end, &next);
754 }
755
756 _leave(" = %d", ret);
757 return ret;
758}
759
760/*
David Howells31143d52007-05-09 02:33:46 -0700761 * write to an AFS file
762 */
Al Viro50b55512014-04-03 14:13:46 -0400763ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700764{
Al Viro496ad9a2013-01-23 17:07:38 -0500765 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700766 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400767 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700768
David Howells3b6492d2018-10-20 00:57:57 +0100769 _enter("{%llx:%llu},{%zu},",
Al Viro50b55512014-04-03 14:13:46 -0400770 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700771
772 if (IS_SWAPFILE(&vnode->vfs_inode)) {
773 printk(KERN_INFO
774 "AFS: Attempt to write to active swap file!\n");
775 return -EBUSY;
776 }
777
778 if (!count)
779 return 0;
780
Al Viro50b55512014-04-03 14:13:46 -0400781 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700782
David Howells31143d52007-05-09 02:33:46 -0700783 _leave(" = %zd", result);
784 return result;
785}
786
787/*
David Howells31143d52007-05-09 02:33:46 -0700788 * flush any dirty pages for this process, and check for write errors.
789 * - the return status from this call provides a reliable indication of
790 * whether any write errors occurred for this process.
791 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400792int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700793{
Al Viro3c981bf2013-09-03 13:37:45 -0400794 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400795 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700796
David Howells3b6492d2018-10-20 00:57:57 +0100797 _enter("{%llx:%llu},{n=%pD},%d",
Al Viro3c981bf2013-09-03 13:37:45 -0400798 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700799 datasync);
800
David Howells4343d002017-11-02 15:27:52 +0000801 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700802}
David Howells9b3f26c2009-04-03 16:42:41 +0100803
804/*
805 * notification that a previously read-only page is about to become writable
806 * - if it returns an error, the caller will deliver a bus error signal
807 */
Souptick Joarder0722f182018-08-23 17:00:48 -0700808vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100809{
David Howells1cf7a152017-11-02 15:27:52 +0000810 struct file *file = vmf->vma->vm_file;
811 struct inode *inode = file_inode(file);
812 struct afs_vnode *vnode = AFS_FS_I(inode);
813 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100814
David Howells3b6492d2018-10-20 00:57:57 +0100815 _enter("{{%llx:%llu}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000816 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100817
David Howells1cf7a152017-11-02 15:27:52 +0000818 sb_start_pagefault(inode->i_sb);
819
820 /* Wait for the page to be written to the cache before we allow it to
821 * be modified. We then assume the entire page will need writing back.
822 */
David Howells9b3f26c2009-04-03 16:42:41 +0100823#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000824 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100825#endif
826
David Howells1cf7a152017-11-02 15:27:52 +0000827 if (PageWriteback(vmf->page) &&
828 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
829 return VM_FAULT_RETRY;
830
831 if (lock_page_killable(vmf->page) < 0)
832 return VM_FAULT_RETRY;
833
834 /* We mustn't change page->private until writeback is complete as that
835 * details the portion of the page we need to write back and we might
836 * need to redirty the page if there's a problem.
837 */
838 wait_on_page_writeback(vmf->page);
839
840 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
841 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000842 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
843 vmf->page->index, priv);
David Howells1cf7a152017-11-02 15:27:52 +0000844 SetPagePrivate(vmf->page);
845 set_page_private(vmf->page, priv);
846
847 sb_end_pagefault(inode->i_sb);
848 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100849}
David Howells4343d002017-11-02 15:27:52 +0000850
851/*
852 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
853 */
854void afs_prune_wb_keys(struct afs_vnode *vnode)
855{
856 LIST_HEAD(graveyard);
857 struct afs_wb_key *wbk, *tmp;
858
859 /* Discard unused keys */
860 spin_lock(&vnode->wb_lock);
861
862 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
863 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
864 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
865 if (refcount_read(&wbk->usage) == 1)
866 list_move(&wbk->vnode_link, &graveyard);
867 }
868 }
869
870 spin_unlock(&vnode->wb_lock);
871
872 while (!list_empty(&graveyard)) {
873 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
874 list_del(&wbk->vnode_link);
875 afs_put_wb_key(wbk);
876 }
877}
878
879/*
880 * Clean up a page during invalidation.
881 */
882int afs_launder_page(struct page *page)
883{
884 struct address_space *mapping = page->mapping;
885 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
886 unsigned long priv;
887 unsigned int f, t;
888 int ret = 0;
889
890 _enter("{%lx}", page->index);
891
892 priv = page_private(page);
893 if (clear_page_dirty_for_io(page)) {
894 f = 0;
895 t = PAGE_SIZE;
896 if (PagePrivate(page)) {
897 f = priv & AFS_PRIV_MAX;
898 t = priv >> AFS_PRIV_SHIFT;
899 }
900
David Howells13524ab2017-11-02 15:27:53 +0000901 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
902 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000903 ret = afs_store_data(mapping, page->index, page->index, t, f);
904 }
905
David Howells13524ab2017-11-02 15:27:53 +0000906 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
907 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000908 set_page_private(page, 0);
909 ClearPagePrivate(page);
910
911#ifdef CONFIG_AFS_FSCACHE
912 if (PageFsCache(page)) {
913 fscache_wait_on_page_write(vnode->cache, page);
914 fscache_uncache_page(vnode->cache, page);
915 }
916#endif
917 return ret;
David Howells31143d52007-05-09 02:33:46 -0700918}