blob: 9c6df721321a97d16de7e809eba73014f6839373 [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040013#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000014#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000015#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000016#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070017#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010019#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000021#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010022#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000023
24#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "log.h"
30#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "quota.h"
32#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000033#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050034#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050035#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040036#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010037#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000038
Steven Whitehouseba7f7292006-07-26 11:27:10 -040039
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010046 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040047 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010052 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040053 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010054 if (start >= to)
55 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050056 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000057 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040058 }
59}
60
David Teiglandb3b94fa2006-01-16 16:50:04 +000061/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040062 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000063 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040071static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000073{
David Teiglandb3b94fa2006-01-16 16:50:04 +000074 int error;
75
Bob Petersone9e1ef22007-12-10 14:13:27 -060076 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 if (error)
78 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040079 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040080 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040081 return 0;
82}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083
David Teiglandb3b94fa2006-01-16 16:50:04 +000084/**
Christoph Hellwig59c01c52019-07-01 23:54:34 +020085 * gfs2_writepage - Write page for writeback mappings
86 * @page: The page
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010087 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000088 */
Christoph Hellwig59c01c52019-07-01 23:54:34 +020089static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +000090{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000091 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -040092 struct gfs2_inode *ip = GFS2_I(inode);
93 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000094 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030095 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000096 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010097
98 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
99 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100100 if (current->journal_info)
101 goto redirty;
102 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300103 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100104 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300105 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100106 goto out;
107 }
Christoph Hellwig59c01c52019-07-01 23:54:34 +0200108
109 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
110
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100111redirty:
112 redirty_page_for_writepage(wbc, page);
113out:
114 unlock_page(page);
115 return 0;
116}
117
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500118/* This is the same as calling block_write_full_page, but it also
119 * writes pages outside of i_size
120 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500121static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
122 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500123{
124 struct inode * const inode = page->mapping->host;
125 loff_t i_size = i_size_read(inode);
126 const pgoff_t end_index = i_size >> PAGE_SHIFT;
127 unsigned offset;
128
129 /*
130 * The page straddles i_size. It must be zeroed out on each and every
131 * writepage invocation because it may be mmapped. "A file is mapped
132 * in multiples of the page size. For a file that is not a multiple of
133 * the page size, the remaining memory is zeroed when mapped, and
134 * writes to that region are not written out to the file."
135 */
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +0100136 offset = i_size & (PAGE_SIZE - 1);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500137 if (page->index == end_index && offset)
138 zero_user_segment(page, offset, PAGE_SIZE);
139
140 return __block_write_full_page(inode, page, get_block, wbc,
141 end_buffer_async_write);
142}
143
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100144/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100145 * __gfs2_jdata_writepage - The core of jdata writepage
146 * @page: The page to write
147 * @wbc: The writeback control
148 *
149 * This is shared between writepage and writepages and implements the
150 * core of the writepage operation. If a transaction is required then
151 * PageChecked will have been set and the transaction will have
152 * already been started before this is called.
153 */
154
155static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
156{
157 struct inode *inode = page->mapping->host;
158 struct gfs2_inode *ip = GFS2_I(inode);
159 struct gfs2_sbd *sdp = GFS2_SB(inode);
160
161 if (PageChecked(page)) {
162 ClearPageChecked(page);
163 if (!page_has_buffers(page)) {
164 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500165 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100166 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100167 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100168 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500169 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100170}
171
172/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100173 * gfs2_jdata_writepage - Write complete page
174 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500175 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100176 *
177 * Returns: errno
178 *
179 */
180
181static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
182{
183 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500184 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100185 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100186 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000187
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500188 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
189 goto out;
190 if (PageChecked(page) || current->journal_info)
191 goto out_ignore;
192 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100193 return ret;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000194
195out_ignore:
196 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500197out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000198 unlock_page(page);
199 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000200}
201
202/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000203 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000204 * @mapping: The mapping to write
205 * @wbc: Write-back control
206 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000207 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000208 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000209static int gfs2_writepages(struct address_space *mapping,
210 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000211{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500212 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
213 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
214
215 /*
216 * Even if we didn't write any pages here, we might still be holding
217 * dirty pages in the ail. We forcibly flush the ail because we don't
218 * want balance_dirty_pages() to loop indefinitely trying to write out
219 * pages held in the ail that it can't find.
220 */
221 if (ret == 0)
222 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
223
224 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000225}
226
227/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100228 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
229 * @mapping: The mapping
230 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100231 * @pvec: The vector of pages
232 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500233 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100234 *
235 * Returns: non-zero if loop should terminate, zero otherwise
236 */
237
238static int gfs2_write_jdata_pagevec(struct address_space *mapping,
239 struct writeback_control *wbc,
240 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600241 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000242 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100243{
244 struct inode *inode = mapping->host;
245 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100246 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100247 int i;
248 int ret;
249
Abhijith Das20b95bf2008-03-06 17:43:52 -0600250 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100251 if (ret < 0)
252 return ret;
253
254 for(i = 0; i < nr_pages; i++) {
255 struct page *page = pvec->pages[i];
256
Steven Whitehouse774016b2014-02-06 15:47:47 +0000257 *done_index = page->index;
258
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100259 lock_page(page);
260
261 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000262continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100263 unlock_page(page);
264 continue;
265 }
266
Steven Whitehouse774016b2014-02-06 15:47:47 +0000267 if (!PageDirty(page)) {
268 /* someone wrote it for us */
269 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100270 }
271
Steven Whitehouse774016b2014-02-06 15:47:47 +0000272 if (PageWriteback(page)) {
273 if (wbc->sync_mode != WB_SYNC_NONE)
274 wait_on_page_writeback(page);
275 else
276 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100277 }
278
Steven Whitehouse774016b2014-02-06 15:47:47 +0000279 BUG_ON(PageWriteback(page));
280 if (!clear_page_dirty_for_io(page))
281 goto continue_unlock;
282
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100283 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100284
285 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000286 if (unlikely(ret)) {
287 if (ret == AOP_WRITEPAGE_ACTIVATE) {
288 unlock_page(page);
289 ret = 0;
290 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100291
Steven Whitehouse774016b2014-02-06 15:47:47 +0000292 /*
293 * done_index is set past this page,
294 * so media errors will not choke
295 * background writeout for the entire
296 * file. This has consequences for
297 * range_cyclic semantics (ie. it may
298 * not be suitable for data integrity
299 * writeout).
300 */
301 *done_index = page->index + 1;
302 ret = 1;
303 break;
304 }
305 }
306
307 /*
308 * We stop writing back only if we are not doing
309 * integrity sync. In case of integrity sync we have to
310 * keep going until we have written all the pages
311 * we tagged for writeback prior to entering this loop.
312 */
313 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100314 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000315 break;
316 }
317
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100318 }
319 gfs2_trans_end(sdp);
320 return ret;
321}
322
323/**
324 * gfs2_write_cache_jdata - Like write_cache_pages but different
325 * @mapping: The mapping to write
326 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100327 *
328 * The reason that we use our own function here is that we need to
329 * start transactions before we grab page locks. This allows us
330 * to get the ordering right.
331 */
332
333static int gfs2_write_cache_jdata(struct address_space *mapping,
334 struct writeback_control *wbc)
335{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100336 int ret = 0;
337 int done = 0;
338 struct pagevec pvec;
339 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000340 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100341 pgoff_t index;
342 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000343 pgoff_t done_index;
344 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100345 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -0500346 xa_mark_t tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100347
Mel Gorman86679822017-11-15 17:37:52 -0800348 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100349 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000350 writeback_index = mapping->writeback_index; /* prev offset */
351 index = writeback_index;
352 if (index == 0)
353 cycled = 1;
354 else
355 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100356 end = -1;
357 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300358 index = wbc->range_start >> PAGE_SHIFT;
359 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100360 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
361 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000362 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100363 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000364 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
365 tag = PAGECACHE_TAG_TOWRITE;
366 else
367 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100368
369retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000370 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
371 tag_pages_for_writeback(mapping, index, end);
372 done_index = index;
373 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800374 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800375 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000376 if (nr_pages == 0)
377 break;
378
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600379 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100380 if (ret)
381 done = 1;
382 if (ret > 0)
383 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100384 pagevec_release(&pvec);
385 cond_resched();
386 }
387
Steven Whitehouse774016b2014-02-06 15:47:47 +0000388 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100389 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000390 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100391 * We hit the last page and there is more work to be done: wrap
392 * back to the start of the file
393 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000394 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100395 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000396 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100397 goto retry;
398 }
399
400 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000401 mapping->writeback_index = done_index;
402
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100403 return ret;
404}
405
406
407/**
408 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
409 * @mapping: The mapping to write
410 * @wbc: The writeback control
411 *
412 */
413
414static int gfs2_jdata_writepages(struct address_space *mapping,
415 struct writeback_control *wbc)
416{
417 struct gfs2_inode *ip = GFS2_I(mapping->host);
418 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
419 int ret;
420
421 ret = gfs2_write_cache_jdata(mapping, wbc);
422 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500423 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
424 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100425 ret = gfs2_write_cache_jdata(mapping, wbc);
426 }
427 return ret;
428}
429
430/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000431 * stuffed_readpage - Fill in a Linux page with stuffed file data
432 * @ip: the inode
433 * @page: the page
434 *
435 * Returns: errno
436 */
Christoph Hellwig378b6cb2019-07-01 23:54:35 +0200437static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000438{
439 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000440 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000441 void *kaddr;
442 int error;
443
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100444 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700445 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100446 * asked for a zero page in the case of a stuffed file being extended,
447 * so we need to supply one here. It doesn't happen often.
448 */
449 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300450 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600451 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100452 return 0;
453 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400454
David Teiglandb3b94fa2006-01-16 16:50:04 +0000455 error = gfs2_meta_inode_buffer(ip, &dibh);
456 if (error)
457 return error;
458
Cong Wangd9349282011-11-25 23:14:30 +0800459 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100460 if (dsize > gfs2_max_stuffed_size(ip))
461 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000462 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300463 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800464 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100465 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000466 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467 SetPageUptodate(page);
468
469 return 0;
470}
471
David Teiglandb3b94fa2006-01-16 16:50:04 +0000472
473/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100474 * __gfs2_readpage - readpage
475 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000476 * @page: The page to read
477 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100478 * This is the core of gfs2's readpage. It's used by the internal file
479 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100480 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100481 */
482
483static int __gfs2_readpage(void *file, struct page *page)
484{
485 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
486 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100487
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100488 int error;
489
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100490 if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
491 !page_has_buffers(page)) {
492 error = iomap_readpage(page, &gfs2_iomap_ops);
493 } else if (gfs2_is_stuffed(ip)) {
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100494 error = stuffed_readpage(ip, page);
495 unlock_page(page);
496 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600497 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100498 }
499
Bob Petersoneb43e662019-11-14 09:52:15 -0500500 if (unlikely(gfs2_withdrawn(sdp)))
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100501 return -EIO;
502
503 return error;
504}
505
506/**
507 * gfs2_readpage - read a page of a file
508 * @file: The file to read
509 * @page: The page of the file
510 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100511 * This deals with the locking required. We have to unlock and
512 * relock the page in order to get the locking in the right
513 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000514 */
515
516static int gfs2_readpage(struct file *file, struct page *page)
517{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100518 struct address_space *mapping = page->mapping;
519 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100520 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000521 int error;
522
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100523 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100524 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
525 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100526 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100527 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100528 error = AOP_TRUNCATED_PAGE;
529 lock_page(page);
530 if (page->mapping == mapping && !PageUptodate(page))
531 error = __gfs2_readpage(file, page);
532 else
533 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100534 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000535out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100536 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100537 if (error && error != AOP_TRUNCATED_PAGE)
538 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100539 return error;
540}
541
542/**
543 * gfs2_internal_read - read an internal file
544 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100545 * @buf: The buffer to fill
546 * @pos: The file position
547 * @size: The amount to read
548 *
549 */
550
Andrew Price43066292012-04-16 16:40:55 +0100551int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
552 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100553{
554 struct address_space *mapping = ip->i_inode.i_mapping;
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100555 unsigned long index = *pos >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300556 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100557 unsigned copied = 0;
558 unsigned amt;
559 struct page *page;
560 void *p;
561
562 do {
563 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300564 if (offset + size > PAGE_SIZE)
565 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100566 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
567 if (IS_ERR(page))
568 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800569 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100570 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800571 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300572 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100573 copied += amt;
574 index++;
575 offset = 0;
576 } while(copied < size);
577 (*pos) += size;
578 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400579}
580
Steven Whitehousefd88de562006-05-05 16:59:11 -0400581/**
582 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500583 * @file: The file to read from
584 * @mapping: Address space info
585 * @pages: List of pages to read
586 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400587 *
588 * Some notes:
589 * 1. This is only for readahead, so we can simply ignore any things
590 * which are slightly inconvenient (such as locking conflicts between
591 * the page lock and the glock) and return having done no I/O. Its
592 * obviously not something we'd want to do on too regular a basis.
593 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500594 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400595 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600596 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400597 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100598
Steven Whitehousefd88de562006-05-05 16:59:11 -0400599static int gfs2_readpages(struct file *file, struct address_space *mapping,
600 struct list_head *pages, unsigned nr_pages)
601{
602 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400603 struct gfs2_inode *ip = GFS2_I(inode);
604 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400605 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100606 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400607
Steven Whitehouse719ee342008-09-18 13:53:59 +0100608 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
609 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100610 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100611 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500612 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600613 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100614 gfs2_glock_dq(&gh);
615out_uninit:
616 gfs2_holder_uninit(&gh);
Bob Petersoneb43e662019-11-14 09:52:15 -0500617 if (unlikely(gfs2_withdrawn(sdp)))
Steven Whitehousefd88de562006-05-05 16:59:11 -0400618 ret = -EIO;
619 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000620}
621
622/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500623 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
624 * @inode: the rindex inode
625 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100626void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500627{
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100628 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500629 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
630 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500631 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
632 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500633 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500634 u64 fs_total, new_free;
635
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100636 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
637 return;
638
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500639 /* Total up the file system space, according to the latest rindex. */
640 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500641 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100642 goto out;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500643
644 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500645 gfs2_statfs_change_in(m_sc, m_bh->b_data +
646 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500647 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
648 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
649 else
650 new_free = 0;
651 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500652 fs_warn(sdp, "File system extended by %llu blocks.\n",
653 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500654 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500655
656 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100657 goto out2;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500658 update_statfs(sdp, m_bh, l_bh);
659 brelse(l_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100660out2:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500661 brelse(m_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100662out:
663 sdp->sd_rindex_uptodate = 0;
664 gfs2_trans_end(sdp);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500665}
666
667/**
Bob Petersonb9e03f12018-02-14 09:32:39 -0700668 * jdata_set_page_dirty - Page dirtying function
Robert Peterson8fb68592007-06-12 11:24:36 -0500669 * @page: The page to dirty
670 *
671 * Returns: 1 if it dirtyed the page, or 0 otherwise
672 */
673
Bob Petersonb9e03f12018-02-14 09:32:39 -0700674static int jdata_set_page_dirty(struct page *page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500675{
Steven Whitehouse55610932007-10-17 08:47:38 +0100676 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500677 return __set_page_dirty_buffers(page);
678}
679
680/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000681 * gfs2_bmap - Block map function
682 * @mapping: Address space info
683 * @lblock: The block to map
684 *
685 * Returns: The disk address for the block or 0 on hole or error
686 */
687
688static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
689{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400690 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000691 struct gfs2_holder i_gh;
692 sector_t dblock = 0;
693 int error;
694
David Teiglandb3b94fa2006-01-16 16:50:04 +0000695 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
696 if (error)
697 return 0;
698
699 if (!gfs2_is_stuffed(ip))
Christoph Hellwig7770c93a2019-07-01 23:54:36 +0200700 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000701
702 gfs2_glock_dq_uninit(&i_gh);
703
704 return dblock;
705}
706
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100707static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
708{
709 struct gfs2_bufdata *bd;
710
711 lock_buffer(bh);
712 gfs2_log_lock(sdp);
713 clear_buffer_dirty(bh);
714 bd = bh->b_private;
715 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400716 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
717 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100718 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500719 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100720 }
721 bh->b_bdev = NULL;
722 clear_buffer_mapped(bh);
723 clear_buffer_req(bh);
724 clear_buffer_new(bh);
725 gfs2_log_unlock(sdp);
726 unlock_buffer(bh);
727}
728
Lukas Czernerd47992f2013-05-21 23:17:23 -0400729static void gfs2_invalidatepage(struct page *page, unsigned int offset,
730 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000731{
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100732 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400733 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300734 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100735 struct buffer_head *bh, *head;
736 unsigned long pos = 0;
737
David Teiglandb3b94fa2006-01-16 16:50:04 +0000738 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400739 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500740 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100741 if (!page_has_buffers(page))
742 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000743
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100744 bh = head = page_buffers(page);
745 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400746 if (pos + bh->b_size > stop)
747 return;
748
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100749 if (offset <= pos)
750 gfs2_discard(sdp, bh);
751 pos += bh->b_size;
752 bh = bh->b_this_page;
753 } while (bh != head);
754out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400755 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100756 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000757}
758
Steven Whitehousec7b33832006-12-14 18:24:26 +0000759/**
Steven Whitehouse623d9352006-08-31 12:14:44 -0400760 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400761 * @page: the page that's being released
762 * @gfp_mask: passed from Linux VFS, ignored by us
763 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000764 * Calls try_to_free_buffers() to free the buffers and put the page if the
765 * buffers can be released.
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400766 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000767 * Returns: 1 if the page was put or else 0
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400768 */
769
770int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
771{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000772 struct address_space *mapping = page->mapping;
773 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400774 struct buffer_head *bh, *head;
775 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400776
777 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +0100778 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400779
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500780 /*
781 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
782 * clean pages might not have had the dirty bit cleared. Thus, it can
783 * send actual dirty pages to ->releasepage() via shrink_active_list().
784 *
785 * As a workaround, we skip pages that contain dirty buffers below.
786 * Once ->releasepage isn't called on dirty pages anymore, we can warn
787 * on dirty buffers like we used to here again.
788 */
789
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100790 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100791 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400792 head = bh = page_buffers(page);
793 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100794 if (atomic_read(&bh->b_count))
795 goto cannot_release;
796 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500797 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100798 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500799 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
800 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100801 bh = bh->b_this_page;
802 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100803 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400804
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100805 head = bh = page_buffers(page);
806 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400807 bd = bh->b_private;
808 if (bd) {
809 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000810 if (!list_empty(&bd->bd_list))
811 list_del_init(&bd->bd_list);
812 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400813 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -0400814 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000815 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400816
817 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -0400818 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000819 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400820
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400821 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +0100822
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100823cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100824 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100825 gfs2_log_unlock(sdp);
826 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400827}
828
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200829static const struct address_space_operations gfs2_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100830 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000831 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100832 .readpage = gfs2_readpage,
833 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100834 .bmap = gfs2_bmap,
835 .invalidatepage = gfs2_invalidatepage,
836 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100837 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000838 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900839 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200840 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100841};
842
Steven Whitehouse55610932007-10-17 08:47:38 +0100843static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100844 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100845 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100846 .readpage = gfs2_readpage,
847 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700848 .set_page_dirty = jdata_set_page_dirty,
Steven Whitehouse55610932007-10-17 08:47:38 +0100849 .bmap = gfs2_bmap,
850 .invalidatepage = gfs2_invalidatepage,
851 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900852 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200853 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100854};
855
856void gfs2_set_aops(struct inode *inode)
857{
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200858 if (gfs2_is_jdata(GFS2_I(inode)))
Steven Whitehouse55610932007-10-17 08:47:38 +0100859 inode->i_mapping->a_ops = &gfs2_jdata_aops;
860 else
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200861 inode->i_mapping->a_ops = &gfs2_aops;
Steven Whitehouse55610932007-10-17 08:47:38 +0100862}