blob: ba83b49ce18c41b8182cb35163c5c6dc684a5ad2 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040013#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000014#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000015#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000016#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070017#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010019#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000021#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010022#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000023
24#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "log.h"
30#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "quota.h"
32#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000033#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050034#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050035#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040036#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010037#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000038
Steven Whitehouseba7f7292006-07-26 11:27:10 -040039
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010046 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040047 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010052 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040053 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010054 if (start >= to)
55 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050056 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000057 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040058 }
59}
60
David Teiglandb3b94fa2006-01-16 16:50:04 +000061/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040062 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000063 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040071static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000073{
David Teiglandb3b94fa2006-01-16 16:50:04 +000074 int error;
75
Bob Petersone9e1ef22007-12-10 14:13:27 -060076 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 if (error)
78 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040079 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040080 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040081 return 0;
82}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083
David Teiglandb3b94fa2006-01-16 16:50:04 +000084/**
Christoph Hellwig59c01c52019-07-01 23:54:34 +020085 * gfs2_writepage - Write page for writeback mappings
86 * @page: The page
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010087 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000088 */
Christoph Hellwig59c01c52019-07-01 23:54:34 +020089static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +000090{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000091 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -040092 struct gfs2_inode *ip = GFS2_I(inode);
93 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000094 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030095 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000096 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010097
98 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
99 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100100 if (current->journal_info)
101 goto redirty;
102 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300103 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100104 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300105 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100106 goto out;
107 }
Christoph Hellwig59c01c52019-07-01 23:54:34 +0200108
109 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
110
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100111redirty:
112 redirty_page_for_writepage(wbc, page);
113out:
114 unlock_page(page);
115 return 0;
116}
117
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500118/* This is the same as calling block_write_full_page, but it also
119 * writes pages outside of i_size
120 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500121static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
122 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500123{
124 struct inode * const inode = page->mapping->host;
125 loff_t i_size = i_size_read(inode);
126 const pgoff_t end_index = i_size >> PAGE_SHIFT;
127 unsigned offset;
128
129 /*
130 * The page straddles i_size. It must be zeroed out on each and every
131 * writepage invocation because it may be mmapped. "A file is mapped
132 * in multiples of the page size. For a file that is not a multiple of
133 * the page size, the remaining memory is zeroed when mapped, and
134 * writes to that region are not written out to the file."
135 */
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +0100136 offset = i_size & (PAGE_SIZE - 1);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500137 if (page->index == end_index && offset)
138 zero_user_segment(page, offset, PAGE_SIZE);
139
140 return __block_write_full_page(inode, page, get_block, wbc,
141 end_buffer_async_write);
142}
143
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100144/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100145 * __gfs2_jdata_writepage - The core of jdata writepage
146 * @page: The page to write
147 * @wbc: The writeback control
148 *
149 * This is shared between writepage and writepages and implements the
150 * core of the writepage operation. If a transaction is required then
151 * PageChecked will have been set and the transaction will have
152 * already been started before this is called.
153 */
154
155static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
156{
157 struct inode *inode = page->mapping->host;
158 struct gfs2_inode *ip = GFS2_I(inode);
159 struct gfs2_sbd *sdp = GFS2_SB(inode);
160
161 if (PageChecked(page)) {
162 ClearPageChecked(page);
163 if (!page_has_buffers(page)) {
164 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500165 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100166 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100167 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100168 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500169 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100170}
171
172/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100173 * gfs2_jdata_writepage - Write complete page
174 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500175 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100176 *
177 * Returns: errno
178 *
179 */
180
181static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
182{
183 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500184 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100185 struct gfs2_sbd *sdp = GFS2_SB(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000186
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500187 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
188 goto out;
189 if (PageChecked(page) || current->journal_info)
190 goto out_ignore;
Bob Petersone5562802019-12-10 12:05:55 -0600191 return __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000192
193out_ignore:
194 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500195out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000196 unlock_page(page);
197 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000198}
199
200/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000201 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000202 * @mapping: The mapping to write
203 * @wbc: Write-back control
204 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000205 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000206 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000207static int gfs2_writepages(struct address_space *mapping,
208 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000209{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500210 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
211 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
212
213 /*
214 * Even if we didn't write any pages here, we might still be holding
215 * dirty pages in the ail. We forcibly flush the ail because we don't
216 * want balance_dirty_pages() to loop indefinitely trying to write out
217 * pages held in the ail that it can't find.
218 */
219 if (ret == 0)
220 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
221
222 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000223}
224
225/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100226 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
227 * @mapping: The mapping
228 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100229 * @pvec: The vector of pages
230 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500231 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100232 *
233 * Returns: non-zero if loop should terminate, zero otherwise
234 */
235
236static int gfs2_write_jdata_pagevec(struct address_space *mapping,
237 struct writeback_control *wbc,
238 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600239 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000240 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100241{
242 struct inode *inode = mapping->host;
243 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100244 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100245 int i;
246 int ret;
247
Abhijith Das20b95bf2008-03-06 17:43:52 -0600248 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100249 if (ret < 0)
250 return ret;
251
252 for(i = 0; i < nr_pages; i++) {
253 struct page *page = pvec->pages[i];
254
Steven Whitehouse774016b2014-02-06 15:47:47 +0000255 *done_index = page->index;
256
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100257 lock_page(page);
258
259 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000260continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100261 unlock_page(page);
262 continue;
263 }
264
Steven Whitehouse774016b2014-02-06 15:47:47 +0000265 if (!PageDirty(page)) {
266 /* someone wrote it for us */
267 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100268 }
269
Steven Whitehouse774016b2014-02-06 15:47:47 +0000270 if (PageWriteback(page)) {
271 if (wbc->sync_mode != WB_SYNC_NONE)
272 wait_on_page_writeback(page);
273 else
274 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100275 }
276
Steven Whitehouse774016b2014-02-06 15:47:47 +0000277 BUG_ON(PageWriteback(page));
278 if (!clear_page_dirty_for_io(page))
279 goto continue_unlock;
280
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100281 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100282
283 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000284 if (unlikely(ret)) {
285 if (ret == AOP_WRITEPAGE_ACTIVATE) {
286 unlock_page(page);
287 ret = 0;
288 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100289
Steven Whitehouse774016b2014-02-06 15:47:47 +0000290 /*
291 * done_index is set past this page,
292 * so media errors will not choke
293 * background writeout for the entire
294 * file. This has consequences for
295 * range_cyclic semantics (ie. it may
296 * not be suitable for data integrity
297 * writeout).
298 */
299 *done_index = page->index + 1;
300 ret = 1;
301 break;
302 }
303 }
304
305 /*
306 * We stop writing back only if we are not doing
307 * integrity sync. In case of integrity sync we have to
308 * keep going until we have written all the pages
309 * we tagged for writeback prior to entering this loop.
310 */
311 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100312 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000313 break;
314 }
315
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100316 }
317 gfs2_trans_end(sdp);
318 return ret;
319}
320
321/**
322 * gfs2_write_cache_jdata - Like write_cache_pages but different
323 * @mapping: The mapping to write
324 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100325 *
326 * The reason that we use our own function here is that we need to
327 * start transactions before we grab page locks. This allows us
328 * to get the ordering right.
329 */
330
331static int gfs2_write_cache_jdata(struct address_space *mapping,
332 struct writeback_control *wbc)
333{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100334 int ret = 0;
335 int done = 0;
336 struct pagevec pvec;
337 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000338 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100339 pgoff_t index;
340 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000341 pgoff_t done_index;
342 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100343 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -0500344 xa_mark_t tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100345
Mel Gorman86679822017-11-15 17:37:52 -0800346 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100347 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000348 writeback_index = mapping->writeback_index; /* prev offset */
349 index = writeback_index;
350 if (index == 0)
351 cycled = 1;
352 else
353 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100354 end = -1;
355 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300356 index = wbc->range_start >> PAGE_SHIFT;
357 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100358 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
359 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000360 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100361 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000362 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
363 tag = PAGECACHE_TAG_TOWRITE;
364 else
365 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100366
367retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000368 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
369 tag_pages_for_writeback(mapping, index, end);
370 done_index = index;
371 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800372 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800373 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000374 if (nr_pages == 0)
375 break;
376
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600377 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100378 if (ret)
379 done = 1;
380 if (ret > 0)
381 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100382 pagevec_release(&pvec);
383 cond_resched();
384 }
385
Steven Whitehouse774016b2014-02-06 15:47:47 +0000386 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100387 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000388 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100389 * We hit the last page and there is more work to be done: wrap
390 * back to the start of the file
391 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000392 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100393 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000394 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100395 goto retry;
396 }
397
398 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000399 mapping->writeback_index = done_index;
400
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100401 return ret;
402}
403
404
405/**
406 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
407 * @mapping: The mapping to write
408 * @wbc: The writeback control
409 *
410 */
411
412static int gfs2_jdata_writepages(struct address_space *mapping,
413 struct writeback_control *wbc)
414{
415 struct gfs2_inode *ip = GFS2_I(mapping->host);
416 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
417 int ret;
418
419 ret = gfs2_write_cache_jdata(mapping, wbc);
420 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500421 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
422 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100423 ret = gfs2_write_cache_jdata(mapping, wbc);
424 }
425 return ret;
426}
427
428/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000429 * stuffed_readpage - Fill in a Linux page with stuffed file data
430 * @ip: the inode
431 * @page: the page
432 *
433 * Returns: errno
434 */
Christoph Hellwig378b6cb2019-07-01 23:54:35 +0200435static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000436{
437 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000438 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000439 void *kaddr;
440 int error;
441
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100442 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700443 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100444 * asked for a zero page in the case of a stuffed file being extended,
445 * so we need to supply one here. It doesn't happen often.
446 */
447 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300448 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600449 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100450 return 0;
451 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400452
David Teiglandb3b94fa2006-01-16 16:50:04 +0000453 error = gfs2_meta_inode_buffer(ip, &dibh);
454 if (error)
455 return error;
456
Cong Wangd9349282011-11-25 23:14:30 +0800457 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100458 if (dsize > gfs2_max_stuffed_size(ip))
459 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000460 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300461 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800462 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100463 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000464 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000465 SetPageUptodate(page);
466
467 return 0;
468}
469
David Teiglandb3b94fa2006-01-16 16:50:04 +0000470
471/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100472 * __gfs2_readpage - readpage
473 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000474 * @page: The page to read
475 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100476 * This is the core of gfs2's readpage. It's used by the internal file
477 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100478 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100479 */
480
481static int __gfs2_readpage(void *file, struct page *page)
482{
483 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
484 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100485
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100486 int error;
487
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100488 if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
489 !page_has_buffers(page)) {
490 error = iomap_readpage(page, &gfs2_iomap_ops);
491 } else if (gfs2_is_stuffed(ip)) {
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100492 error = stuffed_readpage(ip, page);
493 unlock_page(page);
494 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600495 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100496 }
497
Bob Petersoneb43e662019-11-14 09:52:15 -0500498 if (unlikely(gfs2_withdrawn(sdp)))
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100499 return -EIO;
500
501 return error;
502}
503
504/**
505 * gfs2_readpage - read a page of a file
506 * @file: The file to read
507 * @page: The page of the file
508 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100509 * This deals with the locking required. We have to unlock and
510 * relock the page in order to get the locking in the right
511 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000512 */
513
514static int gfs2_readpage(struct file *file, struct page *page)
515{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100516 struct address_space *mapping = page->mapping;
517 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100518 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000519 int error;
520
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100521 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100522 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
523 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100524 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100525 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100526 error = AOP_TRUNCATED_PAGE;
527 lock_page(page);
528 if (page->mapping == mapping && !PageUptodate(page))
529 error = __gfs2_readpage(file, page);
530 else
531 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100532 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000533out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100534 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100535 if (error && error != AOP_TRUNCATED_PAGE)
536 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100537 return error;
538}
539
540/**
541 * gfs2_internal_read - read an internal file
542 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100543 * @buf: The buffer to fill
544 * @pos: The file position
545 * @size: The amount to read
546 *
547 */
548
Andrew Price43066292012-04-16 16:40:55 +0100549int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
550 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100551{
552 struct address_space *mapping = ip->i_inode.i_mapping;
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100553 unsigned long index = *pos >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300554 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100555 unsigned copied = 0;
556 unsigned amt;
557 struct page *page;
558 void *p;
559
560 do {
561 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300562 if (offset + size > PAGE_SIZE)
563 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100564 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
565 if (IS_ERR(page))
566 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800567 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100568 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800569 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300570 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100571 copied += amt;
572 index++;
573 offset = 0;
574 } while(copied < size);
575 (*pos) += size;
576 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400577}
578
Steven Whitehousefd88de562006-05-05 16:59:11 -0400579/**
580 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500581 * @file: The file to read from
582 * @mapping: Address space info
583 * @pages: List of pages to read
584 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400585 *
586 * Some notes:
587 * 1. This is only for readahead, so we can simply ignore any things
588 * which are slightly inconvenient (such as locking conflicts between
589 * the page lock and the glock) and return having done no I/O. Its
590 * obviously not something we'd want to do on too regular a basis.
591 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500592 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400593 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600594 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400595 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100596
Steven Whitehousefd88de562006-05-05 16:59:11 -0400597static int gfs2_readpages(struct file *file, struct address_space *mapping,
598 struct list_head *pages, unsigned nr_pages)
599{
600 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400601 struct gfs2_inode *ip = GFS2_I(inode);
602 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400603 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100604 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400605
Steven Whitehouse719ee342008-09-18 13:53:59 +0100606 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
607 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100608 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100609 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500610 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600611 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100612 gfs2_glock_dq(&gh);
613out_uninit:
614 gfs2_holder_uninit(&gh);
Bob Petersoneb43e662019-11-14 09:52:15 -0500615 if (unlikely(gfs2_withdrawn(sdp)))
Steven Whitehousefd88de562006-05-05 16:59:11 -0400616 ret = -EIO;
617 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000618}
619
620/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500621 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
622 * @inode: the rindex inode
623 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100624void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500625{
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100626 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500627 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
628 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500629 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
630 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500631 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500632 u64 fs_total, new_free;
633
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100634 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
635 return;
636
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500637 /* Total up the file system space, according to the latest rindex. */
638 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500639 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100640 goto out;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500641
642 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500643 gfs2_statfs_change_in(m_sc, m_bh->b_data +
644 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500645 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
646 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
647 else
648 new_free = 0;
649 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500650 fs_warn(sdp, "File system extended by %llu blocks.\n",
651 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500652 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500653
654 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100655 goto out2;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500656 update_statfs(sdp, m_bh, l_bh);
657 brelse(l_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100658out2:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500659 brelse(m_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100660out:
661 sdp->sd_rindex_uptodate = 0;
662 gfs2_trans_end(sdp);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500663}
664
665/**
Bob Petersonb9e03f12018-02-14 09:32:39 -0700666 * jdata_set_page_dirty - Page dirtying function
Robert Peterson8fb68592007-06-12 11:24:36 -0500667 * @page: The page to dirty
668 *
669 * Returns: 1 if it dirtyed the page, or 0 otherwise
670 */
671
Bob Petersonb9e03f12018-02-14 09:32:39 -0700672static int jdata_set_page_dirty(struct page *page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500673{
Steven Whitehouse55610932007-10-17 08:47:38 +0100674 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500675 return __set_page_dirty_buffers(page);
676}
677
678/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000679 * gfs2_bmap - Block map function
680 * @mapping: Address space info
681 * @lblock: The block to map
682 *
683 * Returns: The disk address for the block or 0 on hole or error
684 */
685
686static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
687{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400688 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000689 struct gfs2_holder i_gh;
690 sector_t dblock = 0;
691 int error;
692
David Teiglandb3b94fa2006-01-16 16:50:04 +0000693 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
694 if (error)
695 return 0;
696
697 if (!gfs2_is_stuffed(ip))
Christoph Hellwig7770c93a2019-07-01 23:54:36 +0200698 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000699
700 gfs2_glock_dq_uninit(&i_gh);
701
702 return dblock;
703}
704
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100705static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
706{
707 struct gfs2_bufdata *bd;
708
709 lock_buffer(bh);
710 gfs2_log_lock(sdp);
711 clear_buffer_dirty(bh);
712 bd = bh->b_private;
713 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400714 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
715 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100716 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500717 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100718 }
719 bh->b_bdev = NULL;
720 clear_buffer_mapped(bh);
721 clear_buffer_req(bh);
722 clear_buffer_new(bh);
723 gfs2_log_unlock(sdp);
724 unlock_buffer(bh);
725}
726
Lukas Czernerd47992f2013-05-21 23:17:23 -0400727static void gfs2_invalidatepage(struct page *page, unsigned int offset,
728 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000729{
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100730 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400731 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300732 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100733 struct buffer_head *bh, *head;
734 unsigned long pos = 0;
735
David Teiglandb3b94fa2006-01-16 16:50:04 +0000736 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400737 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500738 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100739 if (!page_has_buffers(page))
740 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000741
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100742 bh = head = page_buffers(page);
743 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400744 if (pos + bh->b_size > stop)
745 return;
746
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100747 if (offset <= pos)
748 gfs2_discard(sdp, bh);
749 pos += bh->b_size;
750 bh = bh->b_this_page;
751 } while (bh != head);
752out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400753 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100754 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000755}
756
Steven Whitehousec7b33832006-12-14 18:24:26 +0000757/**
Steven Whitehouse623d9352006-08-31 12:14:44 -0400758 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400759 * @page: the page that's being released
760 * @gfp_mask: passed from Linux VFS, ignored by us
761 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000762 * Calls try_to_free_buffers() to free the buffers and put the page if the
763 * buffers can be released.
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400764 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000765 * Returns: 1 if the page was put or else 0
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400766 */
767
768int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
769{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000770 struct address_space *mapping = page->mapping;
771 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400772 struct buffer_head *bh, *head;
773 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400774
775 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +0100776 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400777
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500778 /*
779 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
780 * clean pages might not have had the dirty bit cleared. Thus, it can
781 * send actual dirty pages to ->releasepage() via shrink_active_list().
782 *
783 * As a workaround, we skip pages that contain dirty buffers below.
784 * Once ->releasepage isn't called on dirty pages anymore, we can warn
785 * on dirty buffers like we used to here again.
786 */
787
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100788 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100789 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400790 head = bh = page_buffers(page);
791 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100792 if (atomic_read(&bh->b_count))
793 goto cannot_release;
794 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500795 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100796 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500797 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
798 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100799 bh = bh->b_this_page;
800 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100801 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400802
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100803 head = bh = page_buffers(page);
804 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400805 bd = bh->b_private;
806 if (bd) {
807 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000808 if (!list_empty(&bd->bd_list))
809 list_del_init(&bd->bd_list);
810 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400811 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -0400812 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000813 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400814
815 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -0400816 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000817 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400818
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400819 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +0100820
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100821cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100822 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100823 gfs2_log_unlock(sdp);
824 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400825}
826
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200827static const struct address_space_operations gfs2_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100828 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000829 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100830 .readpage = gfs2_readpage,
831 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100832 .bmap = gfs2_bmap,
833 .invalidatepage = gfs2_invalidatepage,
834 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100835 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000836 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900837 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200838 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100839};
840
Steven Whitehouse55610932007-10-17 08:47:38 +0100841static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100842 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100843 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100844 .readpage = gfs2_readpage,
845 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700846 .set_page_dirty = jdata_set_page_dirty,
Steven Whitehouse55610932007-10-17 08:47:38 +0100847 .bmap = gfs2_bmap,
848 .invalidatepage = gfs2_invalidatepage,
849 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900850 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200851 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100852};
853
854void gfs2_set_aops(struct inode *inode)
855{
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200856 if (gfs2_is_jdata(GFS2_I(inode)))
Steven Whitehouse55610932007-10-17 08:47:38 +0100857 inode->i_mapping->a_ops = &gfs2_jdata_aops;
858 else
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200859 inode->i_mapping->a_ops = &gfs2_aops;
Steven Whitehouse55610932007-10-17 08:47:38 +0100860}