blob: 1463b399285ccb8dd196659939fe68a72c62f2f0 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040013#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000014#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000015#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000016#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070017#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010019#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000021#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010022#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000023
24#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "log.h"
30#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "quota.h"
32#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000033#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050034#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050035#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040036#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010037#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000038
Steven Whitehouseba7f7292006-07-26 11:27:10 -040039
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010046 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040047 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010052 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040053 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010054 if (start >= to)
55 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050056 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000057 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040058 }
59}
60
David Teiglandb3b94fa2006-01-16 16:50:04 +000061/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040062 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000063 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040071static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000073{
David Teiglandb3b94fa2006-01-16 16:50:04 +000074 int error;
75
Bob Petersone9e1ef22007-12-10 14:13:27 -060076 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 if (error)
78 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040079 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040080 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040081 return 0;
82}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083
David Teiglandb3b94fa2006-01-16 16:50:04 +000084/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010085 * gfs2_writepage_common - Common bits of writepage
86 * @page: The page to be written
87 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000088 *
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010089 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
David Teiglandb3b94fa2006-01-16 16:50:04 +000090 */
91
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010092static int gfs2_writepage_common(struct page *page,
93 struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +000094{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000095 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -040096 struct gfs2_inode *ip = GFS2_I(inode);
97 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000098 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030099 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000100 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100101
102 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
103 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100104 if (current->journal_info)
105 goto redirty;
106 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300107 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100108 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300109 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100110 goto out;
111 }
112 return 1;
113redirty:
114 redirty_page_for_writepage(wbc, page);
115out:
116 unlock_page(page);
117 return 0;
118}
119
120/**
Steven Whitehouse9d358142013-08-27 21:22:07 +0100121 * gfs2_writepage - Write page for writeback mappings
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100122 * @page: The page
123 * @wbc: The writeback control
124 *
125 */
126
Steven Whitehouse9d358142013-08-27 21:22:07 +0100127static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100128{
129 int ret;
130
131 ret = gfs2_writepage_common(page, wbc);
132 if (ret <= 0)
133 return ret;
134
Steven Whitehouse30116ff2010-06-14 09:58:41 +0100135 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100136}
137
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500138/* This is the same as calling block_write_full_page, but it also
139 * writes pages outside of i_size
140 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500141static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
142 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500143{
144 struct inode * const inode = page->mapping->host;
145 loff_t i_size = i_size_read(inode);
146 const pgoff_t end_index = i_size >> PAGE_SHIFT;
147 unsigned offset;
148
149 /*
150 * The page straddles i_size. It must be zeroed out on each and every
151 * writepage invocation because it may be mmapped. "A file is mapped
152 * in multiples of the page size. For a file that is not a multiple of
153 * the page size, the remaining memory is zeroed when mapped, and
154 * writes to that region are not written out to the file."
155 */
156 offset = i_size & (PAGE_SIZE-1);
157 if (page->index == end_index && offset)
158 zero_user_segment(page, offset, PAGE_SIZE);
159
160 return __block_write_full_page(inode, page, get_block, wbc,
161 end_buffer_async_write);
162}
163
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100164/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100165 * __gfs2_jdata_writepage - The core of jdata writepage
166 * @page: The page to write
167 * @wbc: The writeback control
168 *
169 * This is shared between writepage and writepages and implements the
170 * core of the writepage operation. If a transaction is required then
171 * PageChecked will have been set and the transaction will have
172 * already been started before this is called.
173 */
174
175static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
176{
177 struct inode *inode = page->mapping->host;
178 struct gfs2_inode *ip = GFS2_I(inode);
179 struct gfs2_sbd *sdp = GFS2_SB(inode);
180
181 if (PageChecked(page)) {
182 ClearPageChecked(page);
183 if (!page_has_buffers(page)) {
184 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500185 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100186 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100187 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100188 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500189 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100190}
191
192/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100193 * gfs2_jdata_writepage - Write complete page
194 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500195 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100196 *
197 * Returns: errno
198 *
199 */
200
201static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
202{
203 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500204 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100205 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100206 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000207
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500208 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
209 goto out;
210 if (PageChecked(page) || current->journal_info)
211 goto out_ignore;
212 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100213 return ret;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000214
215out_ignore:
216 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500217out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000218 unlock_page(page);
219 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000220}
221
222/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000223 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000224 * @mapping: The mapping to write
225 * @wbc: Write-back control
226 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000227 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000228 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000229static int gfs2_writepages(struct address_space *mapping,
230 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000231{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500232 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
233 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
234
235 /*
236 * Even if we didn't write any pages here, we might still be holding
237 * dirty pages in the ail. We forcibly flush the ail because we don't
238 * want balance_dirty_pages() to loop indefinitely trying to write out
239 * pages held in the ail that it can't find.
240 */
241 if (ret == 0)
242 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
243
244 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000245}
246
247/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100248 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
249 * @mapping: The mapping
250 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100251 * @pvec: The vector of pages
252 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500253 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100254 *
255 * Returns: non-zero if loop should terminate, zero otherwise
256 */
257
258static int gfs2_write_jdata_pagevec(struct address_space *mapping,
259 struct writeback_control *wbc,
260 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600261 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000262 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100263{
264 struct inode *inode = mapping->host;
265 struct gfs2_sbd *sdp = GFS2_SB(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300266 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100267 int i;
268 int ret;
269
Abhijith Das20b95bf2008-03-06 17:43:52 -0600270 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100271 if (ret < 0)
272 return ret;
273
274 for(i = 0; i < nr_pages; i++) {
275 struct page *page = pvec->pages[i];
276
Steven Whitehouse774016b2014-02-06 15:47:47 +0000277 *done_index = page->index;
278
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100279 lock_page(page);
280
281 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000282continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100283 unlock_page(page);
284 continue;
285 }
286
Steven Whitehouse774016b2014-02-06 15:47:47 +0000287 if (!PageDirty(page)) {
288 /* someone wrote it for us */
289 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100290 }
291
Steven Whitehouse774016b2014-02-06 15:47:47 +0000292 if (PageWriteback(page)) {
293 if (wbc->sync_mode != WB_SYNC_NONE)
294 wait_on_page_writeback(page);
295 else
296 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100297 }
298
Steven Whitehouse774016b2014-02-06 15:47:47 +0000299 BUG_ON(PageWriteback(page));
300 if (!clear_page_dirty_for_io(page))
301 goto continue_unlock;
302
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100303 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100304
305 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000306 if (unlikely(ret)) {
307 if (ret == AOP_WRITEPAGE_ACTIVATE) {
308 unlock_page(page);
309 ret = 0;
310 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100311
Steven Whitehouse774016b2014-02-06 15:47:47 +0000312 /*
313 * done_index is set past this page,
314 * so media errors will not choke
315 * background writeout for the entire
316 * file. This has consequences for
317 * range_cyclic semantics (ie. it may
318 * not be suitable for data integrity
319 * writeout).
320 */
321 *done_index = page->index + 1;
322 ret = 1;
323 break;
324 }
325 }
326
327 /*
328 * We stop writing back only if we are not doing
329 * integrity sync. In case of integrity sync we have to
330 * keep going until we have written all the pages
331 * we tagged for writeback prior to entering this loop.
332 */
333 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100334 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000335 break;
336 }
337
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100338 }
339 gfs2_trans_end(sdp);
340 return ret;
341}
342
343/**
344 * gfs2_write_cache_jdata - Like write_cache_pages but different
345 * @mapping: The mapping to write
346 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100347 *
348 * The reason that we use our own function here is that we need to
349 * start transactions before we grab page locks. This allows us
350 * to get the ordering right.
351 */
352
353static int gfs2_write_cache_jdata(struct address_space *mapping,
354 struct writeback_control *wbc)
355{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100356 int ret = 0;
357 int done = 0;
358 struct pagevec pvec;
359 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000360 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100361 pgoff_t index;
362 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000363 pgoff_t done_index;
364 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100365 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -0500366 xa_mark_t tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100367
Mel Gorman86679822017-11-15 17:37:52 -0800368 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100369 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000370 writeback_index = mapping->writeback_index; /* prev offset */
371 index = writeback_index;
372 if (index == 0)
373 cycled = 1;
374 else
375 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100376 end = -1;
377 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300378 index = wbc->range_start >> PAGE_SHIFT;
379 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100380 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
381 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000382 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100383 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000384 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
385 tag = PAGECACHE_TAG_TOWRITE;
386 else
387 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100388
389retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000390 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
391 tag_pages_for_writeback(mapping, index, end);
392 done_index = index;
393 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800394 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800395 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000396 if (nr_pages == 0)
397 break;
398
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600399 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100400 if (ret)
401 done = 1;
402 if (ret > 0)
403 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100404 pagevec_release(&pvec);
405 cond_resched();
406 }
407
Steven Whitehouse774016b2014-02-06 15:47:47 +0000408 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100409 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000410 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100411 * We hit the last page and there is more work to be done: wrap
412 * back to the start of the file
413 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000414 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100415 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000416 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100417 goto retry;
418 }
419
420 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000421 mapping->writeback_index = done_index;
422
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100423 return ret;
424}
425
426
427/**
428 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
429 * @mapping: The mapping to write
430 * @wbc: The writeback control
431 *
432 */
433
434static int gfs2_jdata_writepages(struct address_space *mapping,
435 struct writeback_control *wbc)
436{
437 struct gfs2_inode *ip = GFS2_I(mapping->host);
438 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
439 int ret;
440
441 ret = gfs2_write_cache_jdata(mapping, wbc);
442 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500443 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
444 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100445 ret = gfs2_write_cache_jdata(mapping, wbc);
446 }
447 return ret;
448}
449
450/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000451 * stuffed_readpage - Fill in a Linux page with stuffed file data
452 * @ip: the inode
453 * @page: the page
454 *
455 * Returns: errno
456 */
457
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100458int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000459{
460 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000461 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000462 void *kaddr;
463 int error;
464
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100465 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700466 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100467 * asked for a zero page in the case of a stuffed file being extended,
468 * so we need to supply one here. It doesn't happen often.
469 */
470 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300471 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600472 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100473 return 0;
474 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400475
David Teiglandb3b94fa2006-01-16 16:50:04 +0000476 error = gfs2_meta_inode_buffer(ip, &dibh);
477 if (error)
478 return error;
479
Cong Wangd9349282011-11-25 23:14:30 +0800480 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100481 if (dsize > gfs2_max_stuffed_size(ip))
482 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000483 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300484 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800485 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100486 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000487 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488 SetPageUptodate(page);
489
490 return 0;
491}
492
David Teiglandb3b94fa2006-01-16 16:50:04 +0000493
494/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100495 * __gfs2_readpage - readpage
496 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000497 * @page: The page to read
498 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100499 * This is the core of gfs2's readpage. It's used by the internal file
500 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100501 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100502 */
503
504static int __gfs2_readpage(void *file, struct page *page)
505{
506 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
507 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100508
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100509 int error;
510
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100511 if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
512 !page_has_buffers(page)) {
513 error = iomap_readpage(page, &gfs2_iomap_ops);
514 } else if (gfs2_is_stuffed(ip)) {
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100515 error = stuffed_readpage(ip, page);
516 unlock_page(page);
517 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600518 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100519 }
520
Bob Peterson04aea0c2019-05-07 13:27:44 -0500521 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100522 return -EIO;
523
524 return error;
525}
526
527/**
528 * gfs2_readpage - read a page of a file
529 * @file: The file to read
530 * @page: The page of the file
531 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100532 * This deals with the locking required. We have to unlock and
533 * relock the page in order to get the locking in the right
534 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000535 */
536
537static int gfs2_readpage(struct file *file, struct page *page)
538{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100539 struct address_space *mapping = page->mapping;
540 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100541 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000542 int error;
543
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100544 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100545 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
546 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100547 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100548 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100549 error = AOP_TRUNCATED_PAGE;
550 lock_page(page);
551 if (page->mapping == mapping && !PageUptodate(page))
552 error = __gfs2_readpage(file, page);
553 else
554 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100555 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000556out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100557 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100558 if (error && error != AOP_TRUNCATED_PAGE)
559 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100560 return error;
561}
562
563/**
564 * gfs2_internal_read - read an internal file
565 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100566 * @buf: The buffer to fill
567 * @pos: The file position
568 * @size: The amount to read
569 *
570 */
571
Andrew Price43066292012-04-16 16:40:55 +0100572int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
573 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100574{
575 struct address_space *mapping = ip->i_inode.i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300576 unsigned long index = *pos / PAGE_SIZE;
577 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100578 unsigned copied = 0;
579 unsigned amt;
580 struct page *page;
581 void *p;
582
583 do {
584 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300585 if (offset + size > PAGE_SIZE)
586 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100587 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
588 if (IS_ERR(page))
589 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800590 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100591 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800592 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300593 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100594 copied += amt;
595 index++;
596 offset = 0;
597 } while(copied < size);
598 (*pos) += size;
599 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400600}
601
Steven Whitehousefd88de562006-05-05 16:59:11 -0400602/**
603 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500604 * @file: The file to read from
605 * @mapping: Address space info
606 * @pages: List of pages to read
607 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400608 *
609 * Some notes:
610 * 1. This is only for readahead, so we can simply ignore any things
611 * which are slightly inconvenient (such as locking conflicts between
612 * the page lock and the glock) and return having done no I/O. Its
613 * obviously not something we'd want to do on too regular a basis.
614 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500615 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400616 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600617 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400618 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100619
Steven Whitehousefd88de562006-05-05 16:59:11 -0400620static int gfs2_readpages(struct file *file, struct address_space *mapping,
621 struct list_head *pages, unsigned nr_pages)
622{
623 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400624 struct gfs2_inode *ip = GFS2_I(inode);
625 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400626 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100627 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400628
Steven Whitehouse719ee342008-09-18 13:53:59 +0100629 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
630 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100631 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100632 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500633 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600634 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100635 gfs2_glock_dq(&gh);
636out_uninit:
637 gfs2_holder_uninit(&gh);
Bob Peterson04aea0c2019-05-07 13:27:44 -0500638 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
Steven Whitehousefd88de562006-05-05 16:59:11 -0400639 ret = -EIO;
640 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000641}
642
643/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500644 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
645 * @inode: the rindex inode
646 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100647void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500648{
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100649 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500650 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
651 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500652 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
653 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500654 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500655 u64 fs_total, new_free;
656
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100657 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
658 return;
659
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500660 /* Total up the file system space, according to the latest rindex. */
661 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500662 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100663 goto out;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500664
665 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500666 gfs2_statfs_change_in(m_sc, m_bh->b_data +
667 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500668 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
669 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
670 else
671 new_free = 0;
672 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500673 fs_warn(sdp, "File system extended by %llu blocks.\n",
674 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500675 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500676
677 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100678 goto out2;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500679 update_statfs(sdp, m_bh, l_bh);
680 brelse(l_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100681out2:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500682 brelse(m_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100683out:
684 sdp->sd_rindex_uptodate = 0;
685 gfs2_trans_end(sdp);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500686}
687
688/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700689 * gfs2_stuffed_write_end - Write end for stuffed files
690 * @inode: The inode
691 * @dibh: The buffer_head containing the on-disk inode
692 * @pos: The file position
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700693 * @copied: How much was actually copied by the VFS
694 * @page: The page
695 *
696 * This copies the data from the page into the inode block after
697 * the inode data structure itself.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000698 *
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100699 * Returns: copied bytes or errno
David Teiglandb3b94fa2006-01-16 16:50:04 +0000700 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100701int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
702 loff_t pos, unsigned copied,
703 struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000704{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400705 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700706 u64 to = pos + copied;
707 void *kaddr;
708 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000709
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500710 BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100711
Cong Wangd9349282011-11-25 23:14:30 +0800712 kaddr = kmap_atomic(page);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700713 memcpy(buf + pos, kaddr + pos, copied);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700714 flush_dcache_page(page);
Cong Wangd9349282011-11-25 23:14:30 +0800715 kunmap_atomic(kaddr);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000716
Al Viro43388b22016-09-05 22:06:35 -0400717 WARN_ON(!PageUptodate(page));
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700718 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300719 put_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000720
Abhijith Das7537d81a2009-05-12 11:16:20 -0500721 if (copied) {
Steven Whitehousea2e0f792010-08-11 09:53:11 +0100722 if (inode->i_size < to)
Abhijith Das7537d81a2009-05-12 11:16:20 -0500723 i_size_write(inode, to);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700724 mark_inode_dirty(inode);
Steven Whitehouse48516ce2006-10-02 12:39:19 -0400725 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700726 return copied;
727}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000728
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700729/**
Bob Petersonb9e03f12018-02-14 09:32:39 -0700730 * jdata_set_page_dirty - Page dirtying function
Robert Peterson8fb68592007-06-12 11:24:36 -0500731 * @page: The page to dirty
732 *
733 * Returns: 1 if it dirtyed the page, or 0 otherwise
734 */
735
Bob Petersonb9e03f12018-02-14 09:32:39 -0700736static int jdata_set_page_dirty(struct page *page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500737{
Steven Whitehouse55610932007-10-17 08:47:38 +0100738 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500739 return __set_page_dirty_buffers(page);
740}
741
742/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000743 * gfs2_bmap - Block map function
744 * @mapping: Address space info
745 * @lblock: The block to map
746 *
747 * Returns: The disk address for the block or 0 on hole or error
748 */
749
750static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
751{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400752 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000753 struct gfs2_holder i_gh;
754 sector_t dblock = 0;
755 int error;
756
David Teiglandb3b94fa2006-01-16 16:50:04 +0000757 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
758 if (error)
759 return 0;
760
761 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600762 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000763
764 gfs2_glock_dq_uninit(&i_gh);
765
766 return dblock;
767}
768
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100769static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
770{
771 struct gfs2_bufdata *bd;
772
773 lock_buffer(bh);
774 gfs2_log_lock(sdp);
775 clear_buffer_dirty(bh);
776 bd = bh->b_private;
777 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400778 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
779 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100780 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500781 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100782 }
783 bh->b_bdev = NULL;
784 clear_buffer_mapped(bh);
785 clear_buffer_req(bh);
786 clear_buffer_new(bh);
787 gfs2_log_unlock(sdp);
788 unlock_buffer(bh);
789}
790
Lukas Czernerd47992f2013-05-21 23:17:23 -0400791static void gfs2_invalidatepage(struct page *page, unsigned int offset,
792 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000793{
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100794 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400795 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300796 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100797 struct buffer_head *bh, *head;
798 unsigned long pos = 0;
799
David Teiglandb3b94fa2006-01-16 16:50:04 +0000800 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400801 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500802 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100803 if (!page_has_buffers(page))
804 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000805
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100806 bh = head = page_buffers(page);
807 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400808 if (pos + bh->b_size > stop)
809 return;
810
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100811 if (offset <= pos)
812 gfs2_discard(sdp, bh);
813 pos += bh->b_size;
814 bh = bh->b_this_page;
815 } while (bh != head);
816out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400817 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100818 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000819}
820
Steven Whitehousec7b33832006-12-14 18:24:26 +0000821/**
Steven Whitehouse623d9352006-08-31 12:14:44 -0400822 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400823 * @page: the page that's being released
824 * @gfp_mask: passed from Linux VFS, ignored by us
825 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000826 * Calls try_to_free_buffers() to free the buffers and put the page if the
827 * buffers can be released.
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400828 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000829 * Returns: 1 if the page was put or else 0
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400830 */
831
832int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
833{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000834 struct address_space *mapping = page->mapping;
835 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400836 struct buffer_head *bh, *head;
837 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400838
839 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +0100840 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400841
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500842 /*
843 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
844 * clean pages might not have had the dirty bit cleared. Thus, it can
845 * send actual dirty pages to ->releasepage() via shrink_active_list().
846 *
847 * As a workaround, we skip pages that contain dirty buffers below.
848 * Once ->releasepage isn't called on dirty pages anymore, we can warn
849 * on dirty buffers like we used to here again.
850 */
851
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100852 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100853 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400854 head = bh = page_buffers(page);
855 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100856 if (atomic_read(&bh->b_count))
857 goto cannot_release;
858 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500859 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100860 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500861 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
862 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100863 bh = bh->b_this_page;
864 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100865 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400866
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100867 head = bh = page_buffers(page);
868 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400869 bd = bh->b_private;
870 if (bd) {
871 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000872 if (!list_empty(&bd->bd_list))
873 list_del_init(&bd->bd_list);
874 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400875 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -0400876 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000877 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400878
879 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -0400880 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000881 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400882
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400883 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +0100884
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100885cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100886 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100887 gfs2_log_unlock(sdp);
888 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400889}
890
Steven Whitehouse55610932007-10-17 08:47:38 +0100891static const struct address_space_operations gfs2_writeback_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100892 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000893 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100894 .readpage = gfs2_readpage,
895 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100896 .bmap = gfs2_bmap,
897 .invalidatepage = gfs2_invalidatepage,
898 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100899 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000900 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900901 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200902 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100903};
904
905static const struct address_space_operations gfs2_ordered_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100906 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000907 .writepages = gfs2_writepages,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000908 .readpage = gfs2_readpage,
Steven Whitehousefd88de562006-05-05 16:59:11 -0400909 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700910 .set_page_dirty = __set_page_dirty_buffers,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000911 .bmap = gfs2_bmap,
912 .invalidatepage = gfs2_invalidatepage,
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400913 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100914 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000915 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900916 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200917 .error_remove_page = generic_error_remove_page,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000918};
919
Steven Whitehouse55610932007-10-17 08:47:38 +0100920static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100921 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100922 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100923 .readpage = gfs2_readpage,
924 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700925 .set_page_dirty = jdata_set_page_dirty,
Steven Whitehouse55610932007-10-17 08:47:38 +0100926 .bmap = gfs2_bmap,
927 .invalidatepage = gfs2_invalidatepage,
928 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900929 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200930 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100931};
932
933void gfs2_set_aops(struct inode *inode)
934{
935 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200936 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse55610932007-10-17 08:47:38 +0100937
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200938 if (gfs2_is_jdata(ip))
Steven Whitehouse55610932007-10-17 08:47:38 +0100939 inode->i_mapping->a_ops = &gfs2_jdata_aops;
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200940 else if (gfs2_is_writeback(sdp))
941 inode->i_mapping->a_ops = &gfs2_writeback_aops;
942 else if (gfs2_is_ordered(sdp))
943 inode->i_mapping->a_ops = &gfs2_ordered_aops;
Steven Whitehouse55610932007-10-17 08:47:38 +0100944 else
945 BUG();
946}