blob: d62a6b54152def9cf7ab883dbda02ddc29a4d1a9 [file] [log] [blame]
David Howells31143d52007-05-09 02:33:46 -07001/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070011#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -070012#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15#include <linux/writeback.h>
16#include <linux/pagevec.h>
17#include "internal.h"
18
19static int afs_write_back_from_locked_page(struct afs_writeback *wb,
20 struct page *page);
21
22/*
23 * mark a page as having been made dirty and thus needing writeback
24 */
25int afs_set_page_dirty(struct page *page)
26{
27 _enter("");
28 return __set_page_dirty_nobuffers(page);
29}
30
31/*
32 * unlink a writeback record because its usage has reached zero
33 * - must be called with the wb->vnode->writeback_lock held
34 */
35static void afs_unlink_writeback(struct afs_writeback *wb)
36{
37 struct afs_writeback *front;
38 struct afs_vnode *vnode = wb->vnode;
39
40 list_del_init(&wb->link);
41 if (!list_empty(&vnode->writebacks)) {
42 /* if an fsync rises to the front of the queue then wake it
43 * up */
44 front = list_entry(vnode->writebacks.next,
45 struct afs_writeback, link);
46 if (front->state == AFS_WBACK_SYNCING) {
47 _debug("wake up sync");
48 front->state = AFS_WBACK_COMPLETE;
49 wake_up(&front->waitq);
50 }
51 }
52}
53
54/*
55 * free a writeback record
56 */
57static void afs_free_writeback(struct afs_writeback *wb)
58{
59 _enter("");
60 key_put(wb->key);
61 kfree(wb);
62}
63
64/*
65 * dispose of a reference to a writeback record
66 */
67void afs_put_writeback(struct afs_writeback *wb)
68{
69 struct afs_vnode *vnode = wb->vnode;
70
71 _enter("{%d}", wb->usage);
72
73 spin_lock(&vnode->writeback_lock);
74 if (--wb->usage == 0)
75 afs_unlink_writeback(wb);
76 else
77 wb = NULL;
78 spin_unlock(&vnode->writeback_lock);
79 if (wb)
80 afs_free_writeback(wb);
81}
82
83/*
84 * partly or wholly fill a page that's under preparation for writing
85 */
86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000087 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070088{
David Howells196ee9c2017-01-05 10:38:34 +000089 struct afs_read *req;
David Howells31143d52007-05-09 02:33:46 -070090 int ret;
91
Anton Blanchard5e7f2332011-06-13 22:31:12 +010092 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070093
David Howells196ee9c2017-01-05 10:38:34 +000094 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
95 GFP_KERNEL);
96 if (!req)
97 return -ENOMEM;
98
99 atomic_set(&req->usage, 1);
100 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +0000101 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +0000102 req->nr_pages = 1;
103 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +0000104 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +0000105
David Howells196ee9c2017-01-05 10:38:34 +0000106 ret = afs_vnode_fetch_data(vnode, key, req);
107 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -0700108 if (ret < 0) {
109 if (ret == -ENOENT) {
110 _debug("got NOENT from server"
111 " - marking file deleted and stale");
112 set_bit(AFS_VNODE_DELETED, &vnode->flags);
113 ret = -ESTALE;
114 }
115 }
116
117 _leave(" = %d", ret);
118 return ret;
119}
120
121/*
David Howells31143d52007-05-09 02:33:46 -0700122 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -0700123 */
Nick Piggin15b46502008-10-15 22:04:32 -0700124int afs_write_begin(struct file *file, struct address_space *mapping,
125 loff_t pos, unsigned len, unsigned flags,
126 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -0700127{
128 struct afs_writeback *candidate, *wb;
Al Viro496ad9a2013-01-23 17:07:38 -0500129 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -0700130 struct page *page;
David Howells31143d52007-05-09 02:33:46 -0700131 struct key *key = file->private_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300132 unsigned from = pos & (PAGE_SIZE - 1);
Nick Piggin15b46502008-10-15 22:04:32 -0700133 unsigned to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300134 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700135 int ret;
136
137 _enter("{%x:%u},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -0700138 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -0700139
140 candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);
141 if (!candidate)
142 return -ENOMEM;
143 candidate->vnode = vnode;
Nick Piggin15b46502008-10-15 22:04:32 -0700144 candidate->first = candidate->last = index;
145 candidate->offset_first = from;
David Howells31143d52007-05-09 02:33:46 -0700146 candidate->to_last = to;
Anton Blanchardf129ccc2011-02-25 15:33:02 +0000147 INIT_LIST_HEAD(&candidate->link);
David Howells31143d52007-05-09 02:33:46 -0700148 candidate->usage = 1;
149 candidate->state = AFS_WBACK_PENDING;
150 init_waitqueue_head(&candidate->waitq);
151
Nick Piggin54566b22009-01-04 12:00:53 -0800152 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin15b46502008-10-15 22:04:32 -0700153 if (!page) {
154 kfree(candidate);
155 return -ENOMEM;
156 }
Nick Piggin15b46502008-10-15 22:04:32 -0700157
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300158 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +0000159 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -0700160 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +0000161 unlock_page(page);
162 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700163 kfree(candidate);
164 _leave(" = %d [prep]", ret);
165 return ret;
166 }
Nick Piggin15b46502008-10-15 22:04:32 -0700167 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700168 }
169
David Howells6d06b0d2017-03-16 16:27:48 +0000170 /* page won't leak in error case: it eventually gets cleaned off LRU */
171 *pagep = page;
172
David Howells31143d52007-05-09 02:33:46 -0700173try_again:
David Howells31143d52007-05-09 02:33:46 -0700174 spin_lock(&vnode->writeback_lock);
175
176 /* see if this page is already pending a writeback under a suitable key
177 * - if so we can just join onto that one */
178 wb = (struct afs_writeback *) page_private(page);
179 if (wb) {
180 if (wb->key == key && wb->state == AFS_WBACK_PENDING)
181 goto subsume_in_current_wb;
182 goto flush_conflicting_wb;
183 }
184
185 if (index > 0) {
186 /* see if we can find an already pending writeback that we can
187 * append this page to */
188 list_for_each_entry(wb, &vnode->writebacks, link) {
189 if (wb->last == index - 1 && wb->key == key &&
190 wb->state == AFS_WBACK_PENDING)
191 goto append_to_previous_wb;
192 }
193 }
194
195 list_add_tail(&candidate->link, &vnode->writebacks);
196 candidate->key = key_get(key);
197 spin_unlock(&vnode->writeback_lock);
198 SetPagePrivate(page);
199 set_page_private(page, (unsigned long) candidate);
200 _leave(" = 0 [new]");
201 return 0;
202
203subsume_in_current_wb:
204 _debug("subsume");
205 ASSERTRANGE(wb->first, <=, index, <=, wb->last);
Nick Piggin15b46502008-10-15 22:04:32 -0700206 if (index == wb->first && from < wb->offset_first)
207 wb->offset_first = from;
David Howells31143d52007-05-09 02:33:46 -0700208 if (index == wb->last && to > wb->to_last)
209 wb->to_last = to;
210 spin_unlock(&vnode->writeback_lock);
211 kfree(candidate);
212 _leave(" = 0 [sub]");
213 return 0;
214
215append_to_previous_wb:
216 _debug("append into %lx-%lx", wb->first, wb->last);
217 wb->usage++;
218 wb->last++;
219 wb->to_last = to;
220 spin_unlock(&vnode->writeback_lock);
221 SetPagePrivate(page);
222 set_page_private(page, (unsigned long) wb);
223 kfree(candidate);
224 _leave(" = 0 [app]");
225 return 0;
226
227 /* the page is currently bound to another context, so if it's dirty we
228 * need to flush it before we can use the new context */
229flush_conflicting_wb:
230 _debug("flush conflict");
231 if (wb->state == AFS_WBACK_PENDING)
232 wb->state = AFS_WBACK_CONFLICTING;
233 spin_unlock(&vnode->writeback_lock);
David Howells65a15102017-03-16 16:27:49 +0000234 if (clear_page_dirty_for_io(page)) {
David Howells31143d52007-05-09 02:33:46 -0700235 ret = afs_write_back_from_locked_page(wb, page);
236 if (ret < 0) {
237 afs_put_writeback(candidate);
238 _leave(" = %d", ret);
239 return ret;
240 }
241 }
242
243 /* the page holds a ref on the writeback record */
244 afs_put_writeback(wb);
245 set_page_private(page, 0);
246 ClearPagePrivate(page);
247 goto try_again;
248}
249
250/*
251 * finalise part of a write to a page
252 */
Nick Piggin15b46502008-10-15 22:04:32 -0700253int afs_write_end(struct file *file, struct address_space *mapping,
254 loff_t pos, unsigned len, unsigned copied,
255 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700256{
Al Viro496ad9a2013-01-23 17:07:38 -0500257 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howellse8e581a2017-03-16 16:27:44 +0000258 struct key *key = file->private_data;
David Howells31143d52007-05-09 02:33:46 -0700259 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000260 int ret;
David Howells31143d52007-05-09 02:33:46 -0700261
Nick Piggin15b46502008-10-15 22:04:32 -0700262 _enter("{%x:%u},{%lx}",
263 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700264
Nick Piggin15b46502008-10-15 22:04:32 -0700265 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700266
267 i_size = i_size_read(&vnode->vfs_inode);
268 if (maybe_i_size > i_size) {
269 spin_lock(&vnode->writeback_lock);
270 i_size = i_size_read(&vnode->vfs_inode);
271 if (maybe_i_size > i_size)
272 i_size_write(&vnode->vfs_inode, maybe_i_size);
273 spin_unlock(&vnode->writeback_lock);
274 }
275
David Howellse8e581a2017-03-16 16:27:44 +0000276 if (!PageUptodate(page)) {
277 if (copied < len) {
278 /* Try and load any missing data from the server. The
279 * unmarshalling routine will take care of clearing any
280 * bits that are beyond the EOF.
281 */
282 ret = afs_fill_page(vnode, key, pos + copied,
283 len - copied, page);
284 if (ret < 0)
285 return ret;
286 }
287 SetPageUptodate(page);
288 }
289
David Howells31143d52007-05-09 02:33:46 -0700290 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700291 if (PageDirty(page))
292 _debug("dirtied");
Nick Piggin15b46502008-10-15 22:04:32 -0700293 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300294 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700295
Nick Piggin15b46502008-10-15 22:04:32 -0700296 return copied;
David Howells31143d52007-05-09 02:33:46 -0700297}
298
299/*
300 * kill all the pages in the given range
301 */
302static void afs_kill_pages(struct afs_vnode *vnode, bool error,
303 pgoff_t first, pgoff_t last)
304{
305 struct pagevec pv;
306 unsigned count, loop;
307
308 _enter("{%x:%u},%lx-%lx",
309 vnode->fid.vid, vnode->fid.vnode, first, last);
310
311 pagevec_init(&pv, 0);
312
313 do {
314 _debug("kill %lx-%lx", first, last);
315
316 count = last - first + 1;
317 if (count > PAGEVEC_SIZE)
318 count = PAGEVEC_SIZE;
319 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
320 first, count, pv.pages);
321 ASSERTCMP(pv.nr, ==, count);
322
323 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000324 struct page *page = pv.pages[loop];
325 ClearPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700326 if (error)
David Howells7286a352017-03-16 16:27:48 +0000327 SetPageError(page);
328 if (PageWriteback(page))
329 end_page_writeback(page);
330 if (page->index >= first)
331 first = page->index + 1;
David Howells31143d52007-05-09 02:33:46 -0700332 }
333
334 __pagevec_release(&pv);
335 } while (first < last);
336
337 _leave("");
338}
339
340/*
341 * synchronously write back the locked page and any subsequent non-locked dirty
342 * pages also covered by the same writeback record
343 */
344static int afs_write_back_from_locked_page(struct afs_writeback *wb,
345 struct page *primary_page)
346{
347 struct page *pages[8], *page;
348 unsigned long count;
349 unsigned n, offset, to;
350 pgoff_t start, first, last;
351 int loop, ret;
352
353 _enter(",%lx", primary_page->index);
354
355 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700356 if (test_set_page_writeback(primary_page))
357 BUG();
358
359 /* find all consecutive lockable dirty pages, stopping when we find a
360 * page that is not immediately lockable, is not dirty or is missing,
361 * or we reach the end of the range */
362 start = primary_page->index;
363 if (start >= wb->last)
364 goto no_more;
365 start++;
366 do {
367 _debug("more %lx [%lx]", start, count);
368 n = wb->last - start + 1;
369 if (n > ARRAY_SIZE(pages))
370 n = ARRAY_SIZE(pages);
371 n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,
372 start, n, pages);
373 _debug("fgpc %u", n);
374 if (n == 0)
375 goto no_more;
376 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700377 do {
378 put_page(pages[--n]);
379 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700380 goto no_more;
381 }
382
383 for (loop = 0; loop < n; loop++) {
384 page = pages[loop];
385 if (page->index > wb->last)
386 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200387 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700388 break;
389 if (!PageDirty(page) ||
390 page_private(page) != (unsigned long) wb) {
391 unlock_page(page);
392 break;
393 }
394 if (!clear_page_dirty_for_io(page))
395 BUG();
396 if (test_set_page_writeback(page))
397 BUG();
398 unlock_page(page);
399 put_page(page);
400 }
401 count += loop;
402 if (loop < n) {
403 for (; loop < n; loop++)
404 put_page(pages[loop]);
405 goto no_more;
406 }
407
408 start += loop;
409 } while (start <= wb->last && count < 65536);
410
411no_more:
412 /* we now have a contiguous set of dirty pages, each with writeback set
413 * and the dirty mark cleared; the first page is locked and must remain
414 * so, all the rest are unlocked */
415 first = primary_page->index;
416 last = first + count - 1;
417
418 offset = (first == wb->first) ? wb->offset_first : 0;
419 to = (last == wb->last) ? wb->to_last : PAGE_SIZE;
420
421 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
422
423 ret = afs_vnode_store_data(wb, first, last, offset, to);
424 if (ret < 0) {
425 switch (ret) {
426 case -EDQUOT:
427 case -ENOSPC:
Michal Hocko5114a972016-10-11 13:56:01 -0700428 mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
David Howells31143d52007-05-09 02:33:46 -0700429 break;
430 case -EROFS:
431 case -EIO:
432 case -EREMOTEIO:
433 case -EFBIG:
434 case -ENOENT:
435 case -ENOMEDIUM:
436 case -ENXIO:
437 afs_kill_pages(wb->vnode, true, first, last);
Michal Hocko5114a972016-10-11 13:56:01 -0700438 mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
David Howells31143d52007-05-09 02:33:46 -0700439 break;
440 case -EACCES:
441 case -EPERM:
442 case -ENOKEY:
443 case -EKEYEXPIRED:
444 case -EKEYREJECTED:
445 case -EKEYREVOKED:
446 afs_kill_pages(wb->vnode, false, first, last);
447 break;
448 default:
449 break;
450 }
451 } else {
452 ret = count;
453 }
454
455 _leave(" = %d", ret);
456 return ret;
457}
458
459/*
460 * write a page back to the server
461 * - the caller locked the page for us
462 */
463int afs_writepage(struct page *page, struct writeback_control *wbc)
464{
David Howells31143d52007-05-09 02:33:46 -0700465 struct afs_writeback *wb;
466 int ret;
467
468 _enter("{%lx},", page->index);
469
David Howells31143d52007-05-09 02:33:46 -0700470 wb = (struct afs_writeback *) page_private(page);
471 ASSERT(wb != NULL);
472
473 ret = afs_write_back_from_locked_page(wb, page);
474 unlock_page(page);
475 if (ret < 0) {
476 _leave(" = %d", ret);
477 return 0;
478 }
479
480 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700481
482 _leave(" = 0");
483 return 0;
484}
485
486/*
487 * write a region of pages back to the server
488 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700489static int afs_writepages_region(struct address_space *mapping,
490 struct writeback_control *wbc,
491 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700492{
David Howells31143d52007-05-09 02:33:46 -0700493 struct afs_writeback *wb;
494 struct page *page;
495 int ret, n;
496
497 _enter(",,%lx,%lx,", index, end);
498
499 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800500 n = find_get_pages_range_tag(mapping, &index, end,
501 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700502 if (!n)
503 break;
504
505 _debug("wback %lx", page->index);
506
David Howells31143d52007-05-09 02:33:46 -0700507 /* at this point we hold neither mapping->tree_lock nor lock on
508 * the page itself: the page may be truncated or invalidated
509 * (changing page->mapping to NULL), or even swizzled back from
510 * swapper_space to tmpfs file mapping
511 */
512 lock_page(page);
513
David Howellsc5051c72017-03-16 16:27:49 +0000514 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700515 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300516 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700517 continue;
518 }
519
David Howellsc5051c72017-03-16 16:27:49 +0000520 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700521 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000522 if (wbc->sync_mode != WB_SYNC_NONE)
523 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000524 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700525 continue;
526 }
527
528 wb = (struct afs_writeback *) page_private(page);
529 ASSERT(wb != NULL);
530
531 spin_lock(&wb->vnode->writeback_lock);
532 wb->state = AFS_WBACK_WRITING;
533 spin_unlock(&wb->vnode->writeback_lock);
534
David Howells65a15102017-03-16 16:27:49 +0000535 if (!clear_page_dirty_for_io(page))
536 BUG();
David Howells31143d52007-05-09 02:33:46 -0700537 ret = afs_write_back_from_locked_page(wb, page);
538 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300539 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700540 if (ret < 0) {
541 _leave(" = %d", ret);
542 return ret;
543 }
544
545 wbc->nr_to_write -= ret;
546
David Howells31143d52007-05-09 02:33:46 -0700547 cond_resched();
548 } while (index < end && wbc->nr_to_write > 0);
549
550 *_next = index;
551 _leave(" = 0 [%lx]", *_next);
552 return 0;
553}
554
555/*
556 * write some of the pending data back to the server
557 */
558int afs_writepages(struct address_space *mapping,
559 struct writeback_control *wbc)
560{
David Howells31143d52007-05-09 02:33:46 -0700561 pgoff_t start, end, next;
562 int ret;
563
564 _enter("");
565
David Howells31143d52007-05-09 02:33:46 -0700566 if (wbc->range_cyclic) {
567 start = mapping->writeback_index;
568 end = -1;
569 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700570 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700571 ret = afs_writepages_region(mapping, wbc, 0, start,
572 &next);
573 mapping->writeback_index = next;
574 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300575 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700576 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
577 if (wbc->nr_to_write > 0)
578 mapping->writeback_index = next;
579 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300580 start = wbc->range_start >> PAGE_SHIFT;
581 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700582 ret = afs_writepages_region(mapping, wbc, start, end, &next);
583 }
584
585 _leave(" = %d", ret);
586 return ret;
587}
588
589/*
David Howells31143d52007-05-09 02:33:46 -0700590 * completion of write to server
591 */
592void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
593{
594 struct afs_writeback *wb = call->wb;
595 struct pagevec pv;
596 unsigned count, loop;
597 pgoff_t first = call->first, last = call->last;
598 bool free_wb;
599
600 _enter("{%x:%u},{%lx-%lx}",
601 vnode->fid.vid, vnode->fid.vnode, first, last);
602
603 ASSERT(wb != NULL);
604
605 pagevec_init(&pv, 0);
606
607 do {
David Howells5bbf5d32007-05-10 03:15:23 -0700608 _debug("done %lx-%lx", first, last);
David Howells31143d52007-05-09 02:33:46 -0700609
610 count = last - first + 1;
611 if (count > PAGEVEC_SIZE)
612 count = PAGEVEC_SIZE;
613 pv.nr = find_get_pages_contig(call->mapping, first, count,
614 pv.pages);
615 ASSERTCMP(pv.nr, ==, count);
616
617 spin_lock(&vnode->writeback_lock);
618 for (loop = 0; loop < count; loop++) {
619 struct page *page = pv.pages[loop];
620 end_page_writeback(page);
621 if (page_private(page) == (unsigned long) wb) {
622 set_page_private(page, 0);
623 ClearPagePrivate(page);
624 wb->usage--;
625 }
626 }
627 free_wb = false;
628 if (wb->usage == 0) {
629 afs_unlink_writeback(wb);
630 free_wb = true;
631 }
632 spin_unlock(&vnode->writeback_lock);
633 first += count;
634 if (free_wb) {
635 afs_free_writeback(wb);
636 wb = NULL;
637 }
638
639 __pagevec_release(&pv);
David Howells5bbf5d32007-05-10 03:15:23 -0700640 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700641
642 _leave("");
643}
644
645/*
646 * write to an AFS file
647 */
Al Viro50b55512014-04-03 14:13:46 -0400648ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700649{
Al Viro496ad9a2013-01-23 17:07:38 -0500650 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700651 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400652 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700653
Al Viro50b55512014-04-03 14:13:46 -0400654 _enter("{%x.%u},{%zu},",
655 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700656
657 if (IS_SWAPFILE(&vnode->vfs_inode)) {
658 printk(KERN_INFO
659 "AFS: Attempt to write to active swap file!\n");
660 return -EBUSY;
661 }
662
663 if (!count)
664 return 0;
665
Al Viro50b55512014-04-03 14:13:46 -0400666 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700667
David Howells31143d52007-05-09 02:33:46 -0700668 _leave(" = %zd", result);
669 return result;
670}
671
672/*
673 * flush the vnode to the fileserver
674 */
675int afs_writeback_all(struct afs_vnode *vnode)
676{
677 struct address_space *mapping = vnode->vfs_inode.i_mapping;
678 struct writeback_control wbc = {
David Howells31143d52007-05-09 02:33:46 -0700679 .sync_mode = WB_SYNC_ALL,
680 .nr_to_write = LONG_MAX,
David Howells31143d52007-05-09 02:33:46 -0700681 .range_cyclic = 1,
682 };
683 int ret;
684
685 _enter("");
686
687 ret = mapping->a_ops->writepages(mapping, &wbc);
688 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
689
690 _leave(" = %d", ret);
691 return ret;
692}
693
694/*
695 * flush any dirty pages for this process, and check for write errors.
696 * - the return status from this call provides a reliable indication of
697 * whether any write errors occurred for this process.
698 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400699int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700700{
Al Viro3c981bf2013-09-03 13:37:45 -0400701 struct inode *inode = file_inode(file);
David Howells31143d52007-05-09 02:33:46 -0700702 struct afs_writeback *wb, *xwb;
Al Viro3c981bf2013-09-03 13:37:45 -0400703 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700704 int ret;
705
Al Viro3c981bf2013-09-03 13:37:45 -0400706 _enter("{%x:%u},{n=%pD},%d",
707 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700708 datasync);
709
Jeff Layton3b49c9a2017-07-07 15:20:52 -0400710 ret = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -0400711 if (ret)
712 return ret;
Al Viro59551022016-01-22 15:40:57 -0500713 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -0400714
David Howells31143d52007-05-09 02:33:46 -0700715 /* use a writeback record as a marker in the queue - when this reaches
716 * the front of the queue, all the outstanding writes are either
717 * completed or rejected */
718 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
Josef Bacik02c24a82011-07-16 20:44:56 -0400719 if (!wb) {
720 ret = -ENOMEM;
721 goto out;
722 }
David Howells31143d52007-05-09 02:33:46 -0700723 wb->vnode = vnode;
724 wb->first = 0;
725 wb->last = -1;
726 wb->offset_first = 0;
727 wb->to_last = PAGE_SIZE;
728 wb->usage = 1;
729 wb->state = AFS_WBACK_SYNCING;
730 init_waitqueue_head(&wb->waitq);
731
732 spin_lock(&vnode->writeback_lock);
733 list_for_each_entry(xwb, &vnode->writebacks, link) {
734 if (xwb->state == AFS_WBACK_PENDING)
735 xwb->state = AFS_WBACK_CONFLICTING;
736 }
737 list_add_tail(&wb->link, &vnode->writebacks);
738 spin_unlock(&vnode->writeback_lock);
739
740 /* push all the outstanding writebacks to the server */
741 ret = afs_writeback_all(vnode);
742 if (ret < 0) {
743 afs_put_writeback(wb);
744 _leave(" = %d [wb]", ret);
Josef Bacik02c24a82011-07-16 20:44:56 -0400745 goto out;
David Howells31143d52007-05-09 02:33:46 -0700746 }
747
748 /* wait for the preceding writes to actually complete */
749 ret = wait_event_interruptible(wb->waitq,
750 wb->state == AFS_WBACK_COMPLETE ||
751 vnode->writebacks.next == &wb->link);
752 afs_put_writeback(wb);
753 _leave(" = %d", ret);
Josef Bacik02c24a82011-07-16 20:44:56 -0400754out:
Al Viro59551022016-01-22 15:40:57 -0500755 inode_unlock(inode);
David Howells31143d52007-05-09 02:33:46 -0700756 return ret;
757}
David Howells9b3f26c2009-04-03 16:42:41 +0100758
759/*
David Howells58fed942017-03-16 16:27:45 +0000760 * Flush out all outstanding writes on a file opened for writing when it is
761 * closed.
762 */
763int afs_flush(struct file *file, fl_owner_t id)
764{
765 _enter("");
766
767 if ((file->f_mode & FMODE_WRITE) == 0)
768 return 0;
769
770 return vfs_fsync(file, 0);
771}
772
773/*
David Howells9b3f26c2009-04-03 16:42:41 +0100774 * notification that a previously read-only page is about to become writable
775 * - if it returns an error, the caller will deliver a bus error signal
776 */
777int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
778{
779 struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
780
781 _enter("{{%x:%u}},{%lx}",
782 vnode->fid.vid, vnode->fid.vnode, page->index);
783
784 /* wait for the page to be written to the cache before we allow it to
785 * be modified */
786#ifdef CONFIG_AFS_FSCACHE
787 fscache_wait_on_page_write(vnode->cache, page);
788#endif
789
790 _leave(" = 0");
791 return 0;
792}