blob: f14daebbc53139970ce8de27f55442e21c03fa42 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scottf07c2252006-09-28 10:52:15 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Vlad Apostolov93c189c2006-11-11 18:03:49 +110018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stddef.h>
20#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100032#include <linux/kthread.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080033#include <linux/migrate.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070034#include <linux/backing-dev.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080035#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Christoph Hellwig4fb6e8a2014-11-28 14:25:04 +110037#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110038#include "xfs_log_format.h"
Dave Chinner7fd36c42013-08-12 20:49:32 +100039#include "xfs_trans_resv.h"
Dave Chinner239880e2013-10-23 10:50:10 +110040#include "xfs_sb.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050041#include "xfs_mount.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000042#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050044
David Chinner7989cb82007-02-10 18:34:56 +110045static kmem_zone_t *xfs_buf_zone;
Christoph Hellwig23ea4032005-06-21 15:14:01 +100046
Nathan Scottce8e9222006-01-11 15:39:08 +110047#ifdef XFS_BUF_LOCK_TRACKING
48# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#else
Nathan Scottce8e9222006-01-11 15:39:08 +110052# define XB_SET_OWNER(bp) do { } while (0)
53# define XB_CLEAR_OWNER(bp) do { } while (0)
54# define XB_GET_OWNER(bp) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Nathan Scottce8e9222006-01-11 15:39:08 +110057#define xb_to_gfp(flags) \
Dave Chinneraa5c1582012-04-23 15:58:56 +100058 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
James Bottomley73c77e22010-01-25 11:42:24 -060061static inline int
62xfs_buf_is_vmapped(
63 struct xfs_buf *bp)
64{
65 /*
66 * Return true if the buffer is vmapped.
67 *
Dave Chinner611c9942012-04-23 15:59:07 +100068 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
James Bottomley73c77e22010-01-25 11:42:24 -060071 */
Dave Chinner611c9942012-04-23 15:59:07 +100072 return bp->b_addr && bp->b_page_count > 1;
James Bottomley73c77e22010-01-25 11:42:24 -060073}
74
75static inline int
76xfs_buf_vmap_len(
77 struct xfs_buf *bp)
78{
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80}
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082/*
Dave Chinner430cbeb2010-12-02 16:30:55 +110083 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
84 * b_lru_ref count so that the buffer is freed immediately when the buffer
85 * reference count falls to zero. If the buffer is already on the LRU, we need
86 * to remove the reference that LRU holds on the buffer.
87 *
88 * This prevents build-up of stale buffers on the LRU.
89 */
90void
91xfs_buf_stale(
92 struct xfs_buf *bp)
93{
Christoph Hellwig43ff2122012-04-23 15:58:39 +100094 ASSERT(xfs_buf_islocked(bp));
95
Dave Chinner430cbeb2010-12-02 16:30:55 +110096 bp->b_flags |= XBF_STALE;
Christoph Hellwig43ff2122012-04-23 15:58:39 +100097
98 /*
99 * Clear the delwri status so that a delwri queue walker will not
100 * flush this buffer to disk now that it is stale. The delwri queue has
101 * a reference to the buffer, so this is safe to do.
102 */
103 bp->b_flags &= ~_XBF_DELWRI_Q;
104
Dave Chinnera4082352013-08-28 10:18:06 +1000105 spin_lock(&bp->b_lock);
106 atomic_set(&bp->b_lru_ref, 0);
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
Dave Chinnere80dfa12013-08-28 10:18:05 +1000108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
109 atomic_dec(&bp->b_hold);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100110
Dave Chinner430cbeb2010-12-02 16:30:55 +1100111 ASSERT(atomic_read(&bp->b_hold) >= 1);
Dave Chinnera4082352013-08-28 10:18:06 +1000112 spin_unlock(&bp->b_lock);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100113}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Dave Chinner3e85c862012-06-22 18:50:09 +1000115static int
116xfs_buf_get_maps(
117 struct xfs_buf *bp,
118 int map_count)
119{
120 ASSERT(bp->b_maps == NULL);
121 bp->b_map_count = map_count;
122
123 if (map_count == 1) {
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600124 bp->b_maps = &bp->__b_map;
Dave Chinner3e85c862012-06-22 18:50:09 +1000125 return 0;
126 }
127
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
129 KM_NOFS);
130 if (!bp->b_maps)
Dave Chinner24513372014-06-25 14:58:08 +1000131 return -ENOMEM;
Dave Chinner3e85c862012-06-22 18:50:09 +1000132 return 0;
133}
134
135/*
136 * Frees b_pages if it was allocated.
137 */
138static void
139xfs_buf_free_maps(
140 struct xfs_buf *bp)
141{
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600142 if (bp->b_maps != &bp->__b_map) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000143 kmem_free(bp->b_maps);
144 bp->b_maps = NULL;
145 }
146}
147
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000148struct xfs_buf *
Dave Chinner3e85c862012-06-22 18:50:09 +1000149_xfs_buf_alloc(
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000150 struct xfs_buftarg *target,
Dave Chinner3e85c862012-06-22 18:50:09 +1000151 struct xfs_buf_map *map,
152 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100153 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000155 struct xfs_buf *bp;
Dave Chinner3e85c862012-06-22 18:50:09 +1000156 int error;
157 int i;
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000158
Dave Chinneraa5c1582012-04-23 15:58:56 +1000159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000160 if (unlikely(!bp))
161 return NULL;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 /*
Dave Chinner12bcb3f2012-04-23 15:59:05 +1000164 * We don't want certain flags to appear in b_flags unless they are
165 * specifically set by later operations on the buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
Dave Chinner611c9942012-04-23 15:59:07 +1000167 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Nathan Scottce8e9222006-01-11 15:39:08 +1100169 atomic_set(&bp->b_hold, 1);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100170 atomic_set(&bp->b_lru_ref, 1);
David Chinnerb4dd3302008-08-13 16:36:11 +1000171 init_completion(&bp->b_iowait);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100172 INIT_LIST_HEAD(&bp->b_lru);
Nathan Scottce8e9222006-01-11 15:39:08 +1100173 INIT_LIST_HEAD(&bp->b_list);
Dave Chinner74f75a02010-09-24 19:59:04 +1000174 RB_CLEAR_NODE(&bp->b_rbnode);
Thomas Gleixnera731cd12010-09-07 14:33:15 +0000175 sema_init(&bp->b_sema, 0); /* held, no waiters */
Dave Chinnera4082352013-08-28 10:18:06 +1000176 spin_lock_init(&bp->b_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100177 XB_SET_OWNER(bp);
178 bp->b_target = target;
Dave Chinner3e85c862012-06-22 18:50:09 +1000179 bp->b_flags = flags;
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 /*
Dave Chinneraa0e8832012-04-23 15:58:52 +1000182 * Set length and io_length to the same value initially.
183 * I/O routines should use io_length, which will be the same in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * most cases but may be reset (e.g. XFS recovery).
185 */
Dave Chinner3e85c862012-06-22 18:50:09 +1000186 error = xfs_buf_get_maps(bp, nmaps);
187 if (error) {
188 kmem_zone_free(xfs_buf_zone, bp);
189 return NULL;
190 }
191
192 bp->b_bn = map[0].bm_bn;
193 bp->b_length = 0;
194 for (i = 0; i < nmaps; i++) {
195 bp->b_maps[i].bm_bn = map[i].bm_bn;
196 bp->b_maps[i].bm_len = map[i].bm_len;
197 bp->b_length += map[i].bm_len;
198 }
199 bp->b_io_length = bp->b_length;
200
Nathan Scottce8e9222006-01-11 15:39:08 +1100201 atomic_set(&bp->b_pin_count, 0);
202 init_waitqueue_head(&bp->b_waiters);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100204 XFS_STATS_INC(target->bt_mount, xb_create);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000205 trace_xfs_buf_init(bp, _RET_IP_);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000206
207 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
210/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100211 * Allocate a page array capable of holding a specified number
212 * of pages, and point the page buf at it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 */
214STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100215_xfs_buf_get_pages(
216 xfs_buf_t *bp,
Eric Sandeen87937bf2014-04-14 19:01:20 +1000217 int page_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
219 /* Make sure that we have a page list */
Nathan Scottce8e9222006-01-11 15:39:08 +1100220 if (bp->b_pages == NULL) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100221 bp->b_page_count = page_count;
222 if (page_count <= XB_PAGES) {
223 bp->b_pages = bp->b_page_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100225 bp->b_pages = kmem_alloc(sizeof(struct page *) *
Dave Chinneraa5c1582012-04-23 15:58:56 +1000226 page_count, KM_NOFS);
Nathan Scottce8e9222006-01-11 15:39:08 +1100227 if (bp->b_pages == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return -ENOMEM;
229 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232 return 0;
233}
234
235/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100236 * Frees b_pages if it was allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 */
238STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100239_xfs_buf_free_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 xfs_buf_t *bp)
241{
Nathan Scottce8e9222006-01-11 15:39:08 +1100242 if (bp->b_pages != bp->b_page_array) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +1000243 kmem_free(bp->b_pages);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000244 bp->b_pages = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 }
246}
247
248/*
249 * Releases the specified buffer.
250 *
251 * The modification state of any associated pages is left unchanged.
Zhi Yong Wub46fe822013-08-07 10:10:59 +0000252 * The buffer must not be on any hash - use xfs_buf_rele instead for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 * hashed and refcounted buffers
254 */
255void
Nathan Scottce8e9222006-01-11 15:39:08 +1100256xfs_buf_free(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 xfs_buf_t *bp)
258{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000259 trace_xfs_buf_free(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Dave Chinner430cbeb2010-12-02 16:30:55 +1100261 ASSERT(list_empty(&bp->b_lru));
262
Dave Chinner0e6e8472011-03-26 09:16:45 +1100263 if (bp->b_flags & _XBF_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 uint i;
265
James Bottomley73c77e22010-01-25 11:42:24 -0600266 if (xfs_buf_is_vmapped(bp))
Alex Elder8a262e52010-03-16 18:55:56 +0000267 vm_unmap_ram(bp->b_addr - bp->b_offset,
268 bp->b_page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Nathan Scott948ecdb2006-09-28 11:03:13 +1000270 for (i = 0; i < bp->b_page_count; i++) {
271 struct page *page = bp->b_pages[i];
272
Dave Chinner0e6e8472011-03-26 09:16:45 +1100273 __free_page(page);
Nathan Scott948ecdb2006-09-28 11:03:13 +1000274 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100275 } else if (bp->b_flags & _XBF_KMEM)
276 kmem_free(bp->b_addr);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000277 _xfs_buf_free_pages(bp);
Dave Chinner3e85c862012-06-22 18:50:09 +1000278 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000279 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
282/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100283 * Allocates all the pages for buffer in question and builds it's page list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
285STATIC int
Dave Chinner0e6e8472011-03-26 09:16:45 +1100286xfs_buf_allocate_memory(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 xfs_buf_t *bp,
288 uint flags)
289{
Dave Chinneraa0e8832012-04-23 15:58:52 +1000290 size_t size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 size_t nbytes, offset;
Nathan Scottce8e9222006-01-11 15:39:08 +1100292 gfp_t gfp_mask = xb_to_gfp(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 unsigned short page_count, i;
Dave Chinner795cac72012-04-23 15:58:53 +1000294 xfs_off_t start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 int error;
296
Dave Chinner0e6e8472011-03-26 09:16:45 +1100297 /*
298 * for buffers that are contained within a single page, just allocate
299 * the memory from the heap - there's no need for the complexity of
300 * page arrays to keep allocation down to order 0.
301 */
Dave Chinner795cac72012-04-23 15:58:53 +1000302 size = BBTOB(bp->b_length);
303 if (size < PAGE_SIZE) {
Dave Chinneraa5c1582012-04-23 15:58:56 +1000304 bp->b_addr = kmem_alloc(size, KM_NOFS);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100305 if (!bp->b_addr) {
306 /* low memory - use alloc_page loop instead */
307 goto use_alloc_page;
308 }
309
Dave Chinner795cac72012-04-23 15:58:53 +1000310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
Dave Chinner0e6e8472011-03-26 09:16:45 +1100311 ((unsigned long)bp->b_addr & PAGE_MASK)) {
312 /* b_addr spans two pages - use alloc_page instead */
313 kmem_free(bp->b_addr);
314 bp->b_addr = NULL;
315 goto use_alloc_page;
316 }
317 bp->b_offset = offset_in_page(bp->b_addr);
318 bp->b_pages = bp->b_page_array;
319 bp->b_pages[0] = virt_to_page(bp->b_addr);
320 bp->b_page_count = 1;
Dave Chinner611c9942012-04-23 15:59:07 +1000321 bp->b_flags |= _XBF_KMEM;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100322 return 0;
323 }
324
325use_alloc_page:
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000328 >> PAGE_SHIFT;
Dave Chinner795cac72012-04-23 15:58:53 +1000329 page_count = end - start;
Eric Sandeen87937bf2014-04-14 19:01:20 +1000330 error = _xfs_buf_get_pages(bp, page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (unlikely(error))
332 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
Nathan Scottce8e9222006-01-11 15:39:08 +1100334 offset = bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100335 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Nathan Scottce8e9222006-01-11 15:39:08 +1100337 for (i = 0; i < bp->b_page_count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 struct page *page;
339 uint retries = 0;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100340retry:
341 page = alloc_page(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (unlikely(page == NULL)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100343 if (flags & XBF_READ_AHEAD) {
344 bp->b_page_count = i;
Dave Chinner24513372014-06-25 14:58:08 +1000345 error = -ENOMEM;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100346 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348
349 /*
350 * This could deadlock.
351 *
352 * But until all the XFS lowlevel code is revamped to
353 * handle buffer allocation failures we can't do much.
354 */
355 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +1100356 xfs_err(NULL,
Tetsuo Handa5bf97b12015-10-12 15:41:29 +1100357 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
358 current->comm, current->pid,
Harvey Harrison34a622b2008-04-10 12:19:21 +1000359 __func__, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100361 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200362 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 goto retry;
364 }
365
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100366 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Dave Chinner0e6e8472011-03-26 09:16:45 +1100368 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 size -= nbytes;
Nathan Scottce8e9222006-01-11 15:39:08 +1100370 bp->b_pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 offset = 0;
372 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100373 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
Dave Chinner0e6e8472011-03-26 09:16:45 +1100375out_free_pages:
376 for (i = 0; i < bp->b_page_count; i++)
377 __free_page(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return error;
379}
380
381/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300382 * Map buffer into kernel address-space if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 */
384STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100385_xfs_buf_map_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 xfs_buf_t *bp,
387 uint flags)
388{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100389 ASSERT(bp->b_flags & _XBF_PAGES);
Nathan Scottce8e9222006-01-11 15:39:08 +1100390 if (bp->b_page_count == 1) {
Dave Chinner0e6e8472011-03-26 09:16:45 +1100391 /* A single page buffer is always mappable */
Nathan Scottce8e9222006-01-11 15:39:08 +1100392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
Dave Chinner611c9942012-04-23 15:59:07 +1000393 } else if (flags & XBF_UNMAPPED) {
394 bp->b_addr = NULL;
395 } else {
Dave Chinnera19fb382011-03-26 09:13:42 +1100396 int retried = 0;
Dave Chinnerae687e52014-03-07 16:19:14 +1100397 unsigned noio_flag;
Dave Chinnera19fb382011-03-26 09:13:42 +1100398
Dave Chinnerae687e52014-03-07 16:19:14 +1100399 /*
400 * vm_map_ram() will allocate auxillary structures (e.g.
401 * pagetables) with GFP_KERNEL, yet we are likely to be under
402 * GFP_NOFS context here. Hence we need to tell memory reclaim
403 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
404 * memory reclaim re-entering the filesystem here and
405 * potentially deadlocking.
406 */
407 noio_flag = memalloc_noio_save();
Dave Chinnera19fb382011-03-26 09:13:42 +1100408 do {
409 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
410 -1, PAGE_KERNEL);
411 if (bp->b_addr)
412 break;
413 vm_unmap_aliases();
414 } while (retried++ <= 1);
Dave Chinnerae687e52014-03-07 16:19:14 +1100415 memalloc_noio_restore(noio_flag);
Dave Chinnera19fb382011-03-26 09:13:42 +1100416
417 if (!bp->b_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 return -ENOMEM;
Nathan Scottce8e9222006-01-11 15:39:08 +1100419 bp->b_addr += bp->b_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 }
421
422 return 0;
423}
424
425/*
426 * Finding and Reading Buffers
427 */
428
429/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100430 * Look up, and creates if absent, a lockable buffer for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 * a given range of an inode. The buffer is returned
Chandra Seetharamaneabbaf12011-09-08 20:18:50 +0000432 * locked. No I/O is implied by this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 */
434xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100435_xfs_buf_find(
Dave Chinnere70b73f2012-04-23 15:58:49 +1000436 struct xfs_buftarg *btp,
Dave Chinner3e85c862012-06-22 18:50:09 +1000437 struct xfs_buf_map *map,
438 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100439 xfs_buf_flags_t flags,
440 xfs_buf_t *new_bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
Dave Chinner74f75a02010-09-24 19:59:04 +1000442 struct xfs_perag *pag;
443 struct rb_node **rbp;
444 struct rb_node *parent;
445 xfs_buf_t *bp;
Dave Chinner3e85c862012-06-22 18:50:09 +1000446 xfs_daddr_t blkno = map[0].bm_bn;
Dave Chinner10616b82013-01-21 23:53:52 +1100447 xfs_daddr_t eofs;
Dave Chinner3e85c862012-06-22 18:50:09 +1000448 int numblks = 0;
449 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Dave Chinner3e85c862012-06-22 18:50:09 +1000451 for (i = 0; i < nmaps; i++)
452 numblks += map[i].bm_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /* Check for IOs smaller than the sector size / not sector aligned */
Dave Chinnerf79af0b2015-08-25 10:05:13 +1000455 ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
Eric Sandeen6da54172014-01-21 16:45:52 -0600456 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Dave Chinner10616b82013-01-21 23:53:52 +1100458 /*
459 * Corrupted block numbers can get through to here, unfortunately, so we
460 * have to check that the buffer falls within the filesystem bounds.
461 */
462 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
Eric Sandeendb52d092014-11-28 14:03:55 +1100463 if (blkno < 0 || blkno >= eofs) {
Dave Chinner10616b82013-01-21 23:53:52 +1100464 /*
Dave Chinner24513372014-06-25 14:58:08 +1000465 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
Dave Chinner10616b82013-01-21 23:53:52 +1100466 * but none of the higher level infrastructure supports
467 * returning a specific error on buffer lookup failures.
468 */
469 xfs_alert(btp->bt_mount,
470 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
471 __func__, blkno, eofs);
Dave Chinner7bc0dc22013-05-21 18:02:08 +1000472 WARN_ON(1);
Dave Chinner10616b82013-01-21 23:53:52 +1100473 return NULL;
474 }
475
Dave Chinner74f75a02010-09-24 19:59:04 +1000476 /* get tree root */
477 pag = xfs_perag_get(btp->bt_mount,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000478 xfs_daddr_to_agno(btp->bt_mount, blkno));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Dave Chinner74f75a02010-09-24 19:59:04 +1000480 /* walk tree */
481 spin_lock(&pag->pag_buf_lock);
482 rbp = &pag->pag_buf_tree.rb_node;
483 parent = NULL;
484 bp = NULL;
485 while (*rbp) {
486 parent = *rbp;
487 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000489 if (blkno < bp->b_bn)
Dave Chinner74f75a02010-09-24 19:59:04 +1000490 rbp = &(*rbp)->rb_left;
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000491 else if (blkno > bp->b_bn)
Dave Chinner74f75a02010-09-24 19:59:04 +1000492 rbp = &(*rbp)->rb_right;
493 else {
494 /*
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000495 * found a block number match. If the range doesn't
Dave Chinner74f75a02010-09-24 19:59:04 +1000496 * match, the only way this is allowed is if the buffer
497 * in the cache is stale and the transaction that made
498 * it stale has not yet committed. i.e. we are
499 * reallocating a busy extent. Skip this buffer and
500 * continue searching to the right for an exact match.
501 */
Dave Chinner4e94b712012-04-23 15:58:51 +1000502 if (bp->b_length != numblks) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000503 ASSERT(bp->b_flags & XBF_STALE);
504 rbp = &(*rbp)->rb_right;
505 continue;
506 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100507 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 goto found;
509 }
510 }
511
512 /* No match found */
Nathan Scottce8e9222006-01-11 15:39:08 +1100513 if (new_bp) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000514 rb_link_node(&new_bp->b_rbnode, parent, rbp);
515 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
516 /* the buffer keeps the perag reference until it is freed */
517 new_bp->b_pag = pag;
518 spin_unlock(&pag->pag_buf_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 } else {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100520 XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
Dave Chinner74f75a02010-09-24 19:59:04 +1000521 spin_unlock(&pag->pag_buf_lock);
522 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100524 return new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526found:
Dave Chinner74f75a02010-09-24 19:59:04 +1000527 spin_unlock(&pag->pag_buf_lock);
528 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200530 if (!xfs_buf_trylock(bp)) {
531 if (flags & XBF_TRYLOCK) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100532 xfs_buf_rele(bp);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100533 XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
Nathan Scottce8e9222006-01-11 15:39:08 +1100534 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200536 xfs_buf_lock(bp);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100537 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539
Dave Chinner0e6e8472011-03-26 09:16:45 +1100540 /*
541 * if the buffer is stale, clear all the external state associated with
542 * it. We need to keep flags such as how we allocated the buffer memory
543 * intact here.
544 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100545 if (bp->b_flags & XBF_STALE) {
546 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
Dave Chinnercfb02852012-11-12 22:54:19 +1100547 ASSERT(bp->b_iodone == NULL);
Dave Chinner611c9942012-04-23 15:59:07 +1000548 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
Dave Chinner1813dd62012-11-14 17:54:40 +1100549 bp->b_ops = NULL;
David Chinner2f926582005-09-05 08:33:35 +1000550 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000551
552 trace_xfs_buf_find(bp, flags, _RET_IP_);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100553 XFS_STATS_INC(btp->bt_mount, xb_get_locked);
Nathan Scottce8e9222006-01-11 15:39:08 +1100554 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
557/*
Dave Chinner38158322011-09-30 04:45:02 +0000558 * Assembles a buffer covering the specified range. The code is optimised for
559 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
560 * more hits than misses.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 */
Dave Chinner38158322011-09-30 04:45:02 +0000562struct xfs_buf *
Dave Chinner6dde2702012-06-22 18:50:10 +1000563xfs_buf_get_map(
564 struct xfs_buftarg *target,
565 struct xfs_buf_map *map,
566 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100567 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Dave Chinner38158322011-09-30 04:45:02 +0000569 struct xfs_buf *bp;
570 struct xfs_buf *new_bp;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100571 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Dave Chinner6dde2702012-06-22 18:50:10 +1000573 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
Dave Chinner38158322011-09-30 04:45:02 +0000574 if (likely(bp))
575 goto found;
576
Dave Chinner6dde2702012-06-22 18:50:10 +1000577 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100578 if (unlikely(!new_bp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return NULL;
580
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000581 error = xfs_buf_allocate_memory(new_bp, flags);
582 if (error) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000583 xfs_buf_free(new_bp);
Dave Chinner38158322011-09-30 04:45:02 +0000584 return NULL;
585 }
586
Dave Chinner6dde2702012-06-22 18:50:10 +1000587 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000588 if (!bp) {
589 xfs_buf_free(new_bp);
590 return NULL;
591 }
592
593 if (bp != new_bp)
594 xfs_buf_free(new_bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Dave Chinner38158322011-09-30 04:45:02 +0000596found:
Dave Chinner611c9942012-04-23 15:59:07 +1000597 if (!bp->b_addr) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100598 error = _xfs_buf_map_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100600 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500601 "%s: failed to map pagesn", __func__);
Dave Chinnera8acad72012-04-23 15:58:54 +1000602 xfs_buf_relse(bp);
603 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 }
605 }
606
Dave Chinnerb79f4a12016-01-12 07:03:44 +1100607 /*
608 * Clear b_error if this is a lookup from a caller that doesn't expect
609 * valid data to be found in the buffer.
610 */
611 if (!(flags & XBF_READ))
612 xfs_buf_ioerror(bp, 0);
613
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100614 XFS_STATS_INC(target->bt_mount, xb_get);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000615 trace_xfs_buf_get(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100616 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100619STATIC int
620_xfs_buf_read(
621 xfs_buf_t *bp,
622 xfs_buf_flags_t flags)
623{
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000624 ASSERT(!(flags & XBF_WRITE));
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600625 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100626
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000627 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +0200628 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100629
Dave Chinner595bff72014-10-02 09:05:14 +1000630 if (flags & XBF_ASYNC) {
631 xfs_buf_submit(bp);
Dave Chinner0e95f192012-04-23 15:58:46 +1000632 return 0;
Dave Chinner595bff72014-10-02 09:05:14 +1000633 }
634 return xfs_buf_submit_wait(bp);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100635}
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637xfs_buf_t *
Dave Chinner6dde2702012-06-22 18:50:10 +1000638xfs_buf_read_map(
639 struct xfs_buftarg *target,
640 struct xfs_buf_map *map,
641 int nmaps,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100642 xfs_buf_flags_t flags,
Dave Chinner1813dd62012-11-14 17:54:40 +1100643 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
Dave Chinner6dde2702012-06-22 18:50:10 +1000645 struct xfs_buf *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Nathan Scottce8e9222006-01-11 15:39:08 +1100647 flags |= XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Dave Chinner6dde2702012-06-22 18:50:10 +1000649 bp = xfs_buf_get_map(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100650 if (bp) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000651 trace_xfs_buf_read(bp, flags, _RET_IP_);
652
Dave Chinnerb0388bf2016-02-10 15:01:11 +1100653 if (!(bp->b_flags & XBF_DONE)) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100654 XFS_STATS_INC(target->bt_mount, xb_get_read);
Dave Chinner1813dd62012-11-14 17:54:40 +1100655 bp->b_ops = ops;
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100656 _xfs_buf_read(bp, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100657 } else if (flags & XBF_ASYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 /*
659 * Read ahead call which is already satisfied,
660 * drop the buffer
661 */
Dave Chinnera8acad72012-04-23 15:58:54 +1000662 xfs_buf_relse(bp);
663 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 /* We do not want read in the flags */
Nathan Scottce8e9222006-01-11 15:39:08 +1100666 bp->b_flags &= ~XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 }
668 }
669
Nathan Scottce8e9222006-01-11 15:39:08 +1100670 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671}
672
673/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100674 * If we are not low on memory then do the readahead in a deadlock
675 * safe manner.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 */
677void
Dave Chinner6dde2702012-06-22 18:50:10 +1000678xfs_buf_readahead_map(
679 struct xfs_buftarg *target,
680 struct xfs_buf_map *map,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100681 int nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100682 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100684 if (bdi_read_congested(target->bt_bdi))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return;
686
Dave Chinner6dde2702012-06-22 18:50:10 +1000687 xfs_buf_read_map(target, map, nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100688 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689}
690
Dave Chinner5adc94c2010-09-24 21:58:31 +1000691/*
692 * Read an uncached buffer from disk. Allocates and returns a locked
693 * buffer containing the disk contents or nothing.
694 */
Dave Chinnerba372672014-10-02 09:05:32 +1000695int
Dave Chinner5adc94c2010-09-24 21:58:31 +1000696xfs_buf_read_uncached(
Dave Chinner5adc94c2010-09-24 21:58:31 +1000697 struct xfs_buftarg *target,
698 xfs_daddr_t daddr,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000699 size_t numblks,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100700 int flags,
Dave Chinnerba372672014-10-02 09:05:32 +1000701 struct xfs_buf **bpp,
Dave Chinner1813dd62012-11-14 17:54:40 +1100702 const struct xfs_buf_ops *ops)
Dave Chinner5adc94c2010-09-24 21:58:31 +1000703{
Dave Chinnereab4e632012-11-12 22:54:02 +1100704 struct xfs_buf *bp;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000705
Dave Chinnerba372672014-10-02 09:05:32 +1000706 *bpp = NULL;
707
Dave Chinnere70b73f2012-04-23 15:58:49 +1000708 bp = xfs_buf_get_uncached(target, numblks, flags);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000709 if (!bp)
Dave Chinnerba372672014-10-02 09:05:32 +1000710 return -ENOMEM;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000711
712 /* set up the buffer for a read IO */
Dave Chinner3e85c862012-06-22 18:50:09 +1000713 ASSERT(bp->b_map_count == 1);
Dave Chinnerba372672014-10-02 09:05:32 +1000714 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
Dave Chinner3e85c862012-06-22 18:50:09 +1000715 bp->b_maps[0].bm_bn = daddr;
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000716 bp->b_flags |= XBF_READ;
Dave Chinner1813dd62012-11-14 17:54:40 +1100717 bp->b_ops = ops;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000718
Dave Chinner595bff72014-10-02 09:05:14 +1000719 xfs_buf_submit_wait(bp);
Dave Chinnerba372672014-10-02 09:05:32 +1000720 if (bp->b_error) {
721 int error = bp->b_error;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -0800722 xfs_buf_relse(bp);
Dave Chinnerba372672014-10-02 09:05:32 +1000723 return error;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -0800724 }
Dave Chinnerba372672014-10-02 09:05:32 +1000725
726 *bpp = bp;
727 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Dave Chinner44396472011-04-21 09:34:27 +0000730/*
731 * Return a buffer allocated as an empty buffer and associated to external
732 * memory via xfs_buf_associate_memory() back to it's empty state.
733 */
734void
735xfs_buf_set_empty(
736 struct xfs_buf *bp,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000737 size_t numblks)
Dave Chinner44396472011-04-21 09:34:27 +0000738{
739 if (bp->b_pages)
740 _xfs_buf_free_pages(bp);
741
742 bp->b_pages = NULL;
743 bp->b_page_count = 0;
744 bp->b_addr = NULL;
Dave Chinner4e94b712012-04-23 15:58:51 +1000745 bp->b_length = numblks;
Dave Chinneraa0e8832012-04-23 15:58:52 +1000746 bp->b_io_length = numblks;
Dave Chinner3e85c862012-06-22 18:50:09 +1000747
748 ASSERT(bp->b_map_count == 1);
Dave Chinner44396472011-04-21 09:34:27 +0000749 bp->b_bn = XFS_BUF_DADDR_NULL;
Dave Chinner3e85c862012-06-22 18:50:09 +1000750 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
751 bp->b_maps[0].bm_len = bp->b_length;
Dave Chinner44396472011-04-21 09:34:27 +0000752}
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754static inline struct page *
755mem_to_page(
756 void *addr)
757{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800758 if ((!is_vmalloc_addr(addr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return virt_to_page(addr);
760 } else {
761 return vmalloc_to_page(addr);
762 }
763}
764
765int
Nathan Scottce8e9222006-01-11 15:39:08 +1100766xfs_buf_associate_memory(
767 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 void *mem,
769 size_t len)
770{
771 int rval;
772 int i = 0;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100773 unsigned long pageaddr;
774 unsigned long offset;
775 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 int page_count;
777
Dave Chinner0e6e8472011-03-26 09:16:45 +1100778 pageaddr = (unsigned long)mem & PAGE_MASK;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100779 offset = (unsigned long)mem - pageaddr;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100780 buflen = PAGE_ALIGN(len + offset);
781 page_count = buflen >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 /* Free any previous set of page pointers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100784 if (bp->b_pages)
785 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Nathan Scottce8e9222006-01-11 15:39:08 +1100787 bp->b_pages = NULL;
788 bp->b_addr = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Eric Sandeen87937bf2014-04-14 19:01:20 +1000790 rval = _xfs_buf_get_pages(bp, page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 if (rval)
792 return rval;
793
Nathan Scottce8e9222006-01-11 15:39:08 +1100794 bp->b_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100796 for (i = 0; i < bp->b_page_count; i++) {
797 bp->b_pages[i] = mem_to_page((void *)pageaddr);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100798 pageaddr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Dave Chinneraa0e8832012-04-23 15:58:52 +1000801 bp->b_io_length = BTOBB(len);
Dave Chinner4e94b712012-04-23 15:58:51 +1000802 bp->b_length = BTOBB(buflen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 return 0;
805}
806
807xfs_buf_t *
Dave Chinner686865f2010-09-24 20:07:47 +1000808xfs_buf_get_uncached(
809 struct xfs_buftarg *target,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000810 size_t numblks,
Dave Chinner686865f2010-09-24 20:07:47 +1000811 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Dave Chinnere70b73f2012-04-23 15:58:49 +1000813 unsigned long page_count;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000814 int error, i;
Dave Chinner3e85c862012-06-22 18:50:09 +1000815 struct xfs_buf *bp;
816 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Dave Chinner3e85c862012-06-22 18:50:09 +1000818 bp = _xfs_buf_alloc(target, &map, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if (unlikely(bp == NULL))
820 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Dave Chinnere70b73f2012-04-23 15:58:49 +1000822 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
Eric Sandeen87937bf2014-04-14 19:01:20 +1000823 error = _xfs_buf_get_pages(bp, page_count);
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000824 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 goto fail_free_buf;
826
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000827 for (i = 0; i < page_count; i++) {
Dave Chinner686865f2010-09-24 20:07:47 +1000828 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000829 if (!bp->b_pages[i])
830 goto fail_free_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000832 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Dave Chinner611c9942012-04-23 15:59:07 +1000834 error = _xfs_buf_map_pages(bp, 0);
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000835 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100836 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500837 "%s: failed to map pages", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 goto fail_free_mem;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Dave Chinner686865f2010-09-24 20:07:47 +1000841 trace_xfs_buf_get_uncached(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 return bp;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 fail_free_mem:
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000845 while (--i >= 0)
846 __free_page(bp->b_pages[i]);
Christoph Hellwigca165b82007-05-24 15:21:11 +1000847 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 fail_free_buf:
Dave Chinner3e85c862012-06-22 18:50:09 +1000849 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000850 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 fail:
852 return NULL;
853}
854
855/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 * Increment reference count on buffer, to hold the buffer concurrently
857 * with another thread which may release (free) the buffer asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 * Must hold the buffer already to call this function.
859 */
860void
Nathan Scottce8e9222006-01-11 15:39:08 +1100861xfs_buf_hold(
862 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000864 trace_xfs_buf_hold(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100865 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
867
868/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100869 * Releases a hold on the specified buffer. If the
870 * the hold count is 1, calls xfs_buf_free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 */
872void
Nathan Scottce8e9222006-01-11 15:39:08 +1100873xfs_buf_rele(
874 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875{
Dave Chinner74f75a02010-09-24 19:59:04 +1000876 struct xfs_perag *pag = bp->b_pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000878 trace_xfs_buf_rele(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Dave Chinner74f75a02010-09-24 19:59:04 +1000880 if (!pag) {
Dave Chinner430cbeb2010-12-02 16:30:55 +1100881 ASSERT(list_empty(&bp->b_lru));
Dave Chinner74f75a02010-09-24 19:59:04 +1000882 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
Nathan Scottfad3aa12006-02-01 12:14:52 +1100883 if (atomic_dec_and_test(&bp->b_hold))
884 xfs_buf_free(bp);
885 return;
886 }
887
Dave Chinner74f75a02010-09-24 19:59:04 +1000888 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
Dave Chinner430cbeb2010-12-02 16:30:55 +1100889
Lachlan McIlroy37906892008-08-13 15:42:10 +1000890 ASSERT(atomic_read(&bp->b_hold) > 0);
Dave Chinner74f75a02010-09-24 19:59:04 +1000891 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
Dave Chinnera4082352013-08-28 10:18:06 +1000892 spin_lock(&bp->b_lock);
893 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
894 /*
895 * If the buffer is added to the LRU take a new
896 * reference to the buffer for the LRU and clear the
897 * (now stale) dispose list state flag
898 */
899 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
900 bp->b_state &= ~XFS_BSTATE_DISPOSE;
901 atomic_inc(&bp->b_hold);
902 }
903 spin_unlock(&bp->b_lock);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100904 spin_unlock(&pag->pag_buf_lock);
Christoph Hellwig7f14d0a2005-11-02 15:09:35 +1100905 } else {
Dave Chinnera4082352013-08-28 10:18:06 +1000906 /*
907 * most of the time buffers will already be removed from
908 * the LRU, so optimise that case by checking for the
909 * XFS_BSTATE_DISPOSE flag indicating the last list the
910 * buffer was on was the disposal list
911 */
912 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
913 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
914 } else {
915 ASSERT(list_empty(&bp->b_lru));
916 }
917 spin_unlock(&bp->b_lock);
918
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000919 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Dave Chinner74f75a02010-09-24 19:59:04 +1000920 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
921 spin_unlock(&pag->pag_buf_lock);
922 xfs_perag_put(pag);
Nathan Scottce8e9222006-01-11 15:39:08 +1100923 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
925 }
926}
927
928
929/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100930 * Lock a buffer object, if it is not already locked.
Dave Chinner90810b92010-11-30 15:16:16 +1100931 *
932 * If we come across a stale, pinned, locked buffer, we know that we are
933 * being asked to lock a buffer that has been reallocated. Because it is
934 * pinned, we know that the log has not been pushed to disk and hence it
935 * will still be locked. Rather than continuing to have trylock attempts
936 * fail until someone else pushes the log, push it ourselves before
937 * returning. This means that the xfsaild will not get stuck trying
938 * to push on stale inode buffers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 */
940int
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200941xfs_buf_trylock(
942 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
944 int locked;
945
Nathan Scottce8e9222006-01-11 15:39:08 +1100946 locked = down_trylock(&bp->b_sema) == 0;
Darrick J. Wong479c6412016-06-21 11:53:28 +1000947 if (locked) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100948 XB_SET_OWNER(bp);
Darrick J. Wong479c6412016-06-21 11:53:28 +1000949 trace_xfs_buf_trylock(bp, _RET_IP_);
950 } else {
951 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
952 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200953 return locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100957 * Lock a buffer object.
Dave Chinnered3b4d62010-05-21 12:07:08 +1000958 *
959 * If we come across a stale, pinned, locked buffer, we know that we
960 * are being asked to lock a buffer that has been reallocated. Because
961 * it is pinned, we know that the log has not been pushed to disk and
962 * hence it will still be locked. Rather than sleeping until someone
963 * else pushes the log, push it ourselves before trying to get the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100965void
966xfs_buf_lock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200967 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000969 trace_xfs_buf_lock(bp, _RET_IP_);
970
Dave Chinnered3b4d62010-05-21 12:07:08 +1000971 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
Dave Chinnerebad8612010-09-22 10:47:20 +1000972 xfs_log_force(bp->b_target->bt_mount, 0);
Nathan Scottce8e9222006-01-11 15:39:08 +1100973 down(&bp->b_sema);
974 XB_SET_OWNER(bp);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000975
976 trace_xfs_buf_lock_done(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977}
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979void
Nathan Scottce8e9222006-01-11 15:39:08 +1100980xfs_buf_unlock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200981 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Nathan Scottce8e9222006-01-11 15:39:08 +1100983 XB_CLEAR_OWNER(bp);
984 up(&bp->b_sema);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000985
986 trace_xfs_buf_unlock(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987}
988
Nathan Scottce8e9222006-01-11 15:39:08 +1100989STATIC void
990xfs_buf_wait_unpin(
991 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
993 DECLARE_WAITQUEUE (wait, current);
994
Nathan Scottce8e9222006-01-11 15:39:08 +1100995 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 return;
997
Nathan Scottce8e9222006-01-11 15:39:08 +1100998 add_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 for (;;) {
1000 set_current_state(TASK_UNINTERRUPTIBLE);
Nathan Scottce8e9222006-01-11 15:39:08 +11001001 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 break;
Jens Axboe7eaceac2011-03-10 08:52:07 +01001003 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001005 remove_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 set_current_state(TASK_RUNNING);
1007}
1008
1009/*
1010 * Buffer Utility Routines
1011 */
1012
Dave Chinnere8aaba92014-10-02 09:04:22 +10001013void
1014xfs_buf_ioend(
1015 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
Dave Chinnere8aaba92014-10-02 09:04:22 +10001017 bool read = bp->b_flags & XBF_READ;
1018
1019 trace_xfs_buf_iodone(bp, _RET_IP_);
Dave Chinner1813dd62012-11-14 17:54:40 +11001020
1021 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
Dave Chinnerd5929de2013-02-27 13:25:54 +11001022
Dave Chinner61be9c52014-10-02 09:04:31 +10001023 /*
1024 * Pull in IO completion errors now. We are guaranteed to be running
1025 * single threaded, so we don't need the lock to read b_io_error.
1026 */
1027 if (!bp->b_error && bp->b_io_error)
1028 xfs_buf_ioerror(bp, bp->b_io_error);
1029
Dave Chinnere8aaba92014-10-02 09:04:22 +10001030 /* Only validate buffers that were read without errors */
1031 if (read && !bp->b_error && bp->b_ops) {
1032 ASSERT(!bp->b_iodone);
Dave Chinner1813dd62012-11-14 17:54:40 +11001033 bp->b_ops->verify_read(bp);
Dave Chinnere8aaba92014-10-02 09:04:22 +10001034 }
1035
1036 if (!bp->b_error)
1037 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
Christoph Hellwig80f6c292010-08-18 05:29:11 -04001039 if (bp->b_iodone)
Nathan Scottce8e9222006-01-11 15:39:08 +11001040 (*(bp->b_iodone))(bp);
1041 else if (bp->b_flags & XBF_ASYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 xfs_buf_relse(bp);
Dave Chinner595bff72014-10-02 09:05:14 +10001043 else
Dave Chinner1813dd62012-11-14 17:54:40 +11001044 complete(&bp->b_iowait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045}
1046
Dave Chinnere8aaba92014-10-02 09:04:22 +10001047static void
1048xfs_buf_ioend_work(
1049 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
Dave Chinnere8aaba92014-10-02 09:04:22 +10001051 struct xfs_buf *bp =
Brian Fosterb29c70f2014-12-04 09:43:17 +11001052 container_of(work, xfs_buf_t, b_ioend_work);
Dave Chinner1813dd62012-11-14 17:54:40 +11001053
Dave Chinnere8aaba92014-10-02 09:04:22 +10001054 xfs_buf_ioend(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055}
1056
Alexander Kuleshov211fe1a2016-01-04 16:10:42 +11001057static void
Dave Chinnere8aaba92014-10-02 09:04:22 +10001058xfs_buf_ioend_async(
1059 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060{
Brian Fosterb29c70f2014-12-04 09:43:17 +11001061 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1062 queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063}
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065void
Nathan Scottce8e9222006-01-11 15:39:08 +11001066xfs_buf_ioerror(
1067 xfs_buf_t *bp,
1068 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
Dave Chinner24513372014-06-25 14:58:08 +10001070 ASSERT(error <= 0 && error >= -1000);
1071 bp->b_error = error;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001072 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073}
1074
Christoph Hellwig901796a2011-10-10 16:52:49 +00001075void
1076xfs_buf_ioerror_alert(
1077 struct xfs_buf *bp,
1078 const char *func)
1079{
1080 xfs_alert(bp->b_target->bt_mount,
Dave Chinneraa0e8832012-04-23 15:58:52 +10001081"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
Dave Chinner24513372014-06-25 14:58:08 +10001082 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
Christoph Hellwig901796a2011-10-10 16:52:49 +00001083}
1084
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001085int
1086xfs_bwrite(
1087 struct xfs_buf *bp)
1088{
1089 int error;
1090
1091 ASSERT(xfs_buf_islocked(bp));
1092
1093 bp->b_flags |= XBF_WRITE;
Dave Chinner27187752014-10-02 09:04:56 +10001094 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1095 XBF_WRITE_FAIL | XBF_DONE);
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001096
Dave Chinner595bff72014-10-02 09:05:14 +10001097 error = xfs_buf_submit_wait(bp);
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001098 if (error) {
1099 xfs_force_shutdown(bp->b_target->bt_mount,
1100 SHUTDOWN_META_IO_ERROR);
1101 }
1102 return error;
1103}
1104
Brian Foster9bdd9bd2016-05-18 10:56:41 +10001105static void
Nathan Scottce8e9222006-01-11 15:39:08 +11001106xfs_buf_bio_end_io(
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001107 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108{
Brian Foster9bdd9bd2016-05-18 10:56:41 +10001109 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Dave Chinner37eb17e2012-11-12 22:09:46 +11001111 /*
1112 * don't overwrite existing errors - otherwise we can lose errors on
1113 * buffers that require multiple bios to complete.
1114 */
Brian Foster9bdd9bd2016-05-18 10:56:41 +10001115 if (bio->bi_error)
1116 cmpxchg(&bp->b_io_error, 0, bio->bi_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Dave Chinner37eb17e2012-11-12 22:09:46 +11001118 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
James Bottomley73c77e22010-01-25 11:42:24 -06001119 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1120
Dave Chinnere8aaba92014-10-02 09:04:22 +10001121 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1122 xfs_buf_ioend_async(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}
1125
Dave Chinner3e85c862012-06-22 18:50:09 +10001126static void
1127xfs_buf_ioapply_map(
1128 struct xfs_buf *bp,
1129 int map,
1130 int *buf_offset,
1131 int *count,
1132 int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133{
Dave Chinner3e85c862012-06-22 18:50:09 +10001134 int page_index;
1135 int total_nr_pages = bp->b_page_count;
1136 int nr_pages;
1137 struct bio *bio;
1138 sector_t sector = bp->b_maps[map].bm_bn;
1139 int size;
1140 int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Nathan Scottce8e9222006-01-11 15:39:08 +11001142 total_nr_pages = bp->b_page_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Dave Chinner3e85c862012-06-22 18:50:09 +10001144 /* skip the pages in the buffer before the start offset */
1145 page_index = 0;
1146 offset = *buf_offset;
1147 while (offset >= PAGE_SIZE) {
1148 page_index++;
1149 offset -= PAGE_SIZE;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001150 }
1151
Dave Chinner3e85c862012-06-22 18:50:09 +10001152 /*
1153 * Limit the IO size to the length of the current vector, and update the
1154 * remaining IO count for the next time around.
1155 */
1156 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1157 *count -= size;
1158 *buf_offset += size;
Christoph Hellwig34951f52011-07-26 15:06:44 +00001159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160next_chunk:
Nathan Scottce8e9222006-01-11 15:39:08 +11001161 atomic_inc(&bp->b_io_remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1163 if (nr_pages > total_nr_pages)
1164 nr_pages = total_nr_pages;
1165
1166 bio = bio_alloc(GFP_NOIO, nr_pages);
Nathan Scottce8e9222006-01-11 15:39:08 +11001167 bio->bi_bdev = bp->b_target->bt_bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001168 bio->bi_iter.bi_sector = sector;
Nathan Scottce8e9222006-01-11 15:39:08 +11001169 bio->bi_end_io = xfs_buf_bio_end_io;
1170 bio->bi_private = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Dave Chinner0e6e8472011-03-26 09:16:45 +11001172
Dave Chinner3e85c862012-06-22 18:50:09 +10001173 for (; size && nr_pages; nr_pages--, page_index++) {
Dave Chinner0e6e8472011-03-26 09:16:45 +11001174 int rbytes, nbytes = PAGE_SIZE - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176 if (nbytes > size)
1177 nbytes = size;
1178
Dave Chinner3e85c862012-06-22 18:50:09 +10001179 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1180 offset);
Nathan Scottce8e9222006-01-11 15:39:08 +11001181 if (rbytes < nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 break;
1183
1184 offset = 0;
Dave Chinneraa0e8832012-04-23 15:58:52 +10001185 sector += BTOBB(nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 size -= nbytes;
1187 total_nr_pages--;
1188 }
1189
Kent Overstreet4f024f32013-10-11 15:44:27 -07001190 if (likely(bio->bi_iter.bi_size)) {
James Bottomley73c77e22010-01-25 11:42:24 -06001191 if (xfs_buf_is_vmapped(bp)) {
1192 flush_kernel_vmap_range(bp->b_addr,
1193 xfs_buf_vmap_len(bp));
1194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 submit_bio(rw, bio);
1196 if (size)
1197 goto next_chunk;
1198 } else {
Dave Chinner37eb17e2012-11-12 22:09:46 +11001199 /*
1200 * This is guaranteed not to be the last io reference count
Dave Chinner595bff72014-10-02 09:05:14 +10001201 * because the caller (xfs_buf_submit) holds a count itself.
Dave Chinner37eb17e2012-11-12 22:09:46 +11001202 */
1203 atomic_dec(&bp->b_io_remaining);
Dave Chinner24513372014-06-25 14:58:08 +10001204 xfs_buf_ioerror(bp, -EIO);
Dave Chinnerec53d1d2010-07-20 17:52:59 +10001205 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001207
1208}
1209
1210STATIC void
1211_xfs_buf_ioapply(
1212 struct xfs_buf *bp)
1213{
1214 struct blk_plug plug;
1215 int rw;
1216 int offset;
1217 int size;
1218 int i;
1219
Dave Chinnerc163f9a2013-03-12 23:30:34 +11001220 /*
1221 * Make sure we capture only current IO errors rather than stale errors
1222 * left over from previous use of the buffer (e.g. failed readahead).
1223 */
1224 bp->b_error = 0;
1225
Brian Fosterb29c70f2014-12-04 09:43:17 +11001226 /*
1227 * Initialize the I/O completion workqueue if we haven't yet or the
1228 * submitter has not opted to specify a custom one.
1229 */
1230 if (!bp->b_ioend_wq)
1231 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1232
Dave Chinner3e85c862012-06-22 18:50:09 +10001233 if (bp->b_flags & XBF_WRITE) {
1234 if (bp->b_flags & XBF_SYNCIO)
1235 rw = WRITE_SYNC;
1236 else
1237 rw = WRITE;
1238 if (bp->b_flags & XBF_FUA)
1239 rw |= REQ_FUA;
1240 if (bp->b_flags & XBF_FLUSH)
1241 rw |= REQ_FLUSH;
Dave Chinner1813dd62012-11-14 17:54:40 +11001242
1243 /*
1244 * Run the write verifier callback function if it exists. If
1245 * this function fails it will mark the buffer with an error and
1246 * the IO should not be dispatched.
1247 */
1248 if (bp->b_ops) {
1249 bp->b_ops->verify_write(bp);
1250 if (bp->b_error) {
1251 xfs_force_shutdown(bp->b_target->bt_mount,
1252 SHUTDOWN_CORRUPT_INCORE);
1253 return;
1254 }
Dave Chinner400b9d82014-08-04 12:42:40 +10001255 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1256 struct xfs_mount *mp = bp->b_target->bt_mount;
1257
1258 /*
1259 * non-crc filesystems don't attach verifiers during
1260 * log recovery, so don't warn for such filesystems.
1261 */
1262 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1263 xfs_warn(mp,
1264 "%s: no ops on block 0x%llx/0x%x",
1265 __func__, bp->b_bn, bp->b_length);
1266 xfs_hex_dump(bp->b_addr, 64);
1267 dump_stack();
1268 }
Dave Chinner1813dd62012-11-14 17:54:40 +11001269 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001270 } else if (bp->b_flags & XBF_READ_AHEAD) {
1271 rw = READA;
1272 } else {
1273 rw = READ;
1274 }
1275
1276 /* we only use the buffer cache for meta-data */
1277 rw |= REQ_META;
1278
1279 /*
1280 * Walk all the vectors issuing IO on them. Set up the initial offset
1281 * into the buffer and the desired IO size before we start -
1282 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1283 * subsequent call.
1284 */
1285 offset = bp->b_offset;
1286 size = BBTOB(bp->b_io_length);
1287 blk_start_plug(&plug);
1288 for (i = 0; i < bp->b_map_count; i++) {
1289 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1290 if (bp->b_error)
1291 break;
1292 if (size <= 0)
1293 break; /* all done */
1294 }
1295 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
Dave Chinner595bff72014-10-02 09:05:14 +10001298/*
1299 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1300 * the current reference to the IO. It is not safe to reference the buffer after
1301 * a call to this function unless the caller holds an additional reference
1302 * itself.
1303 */
Dave Chinner0e95f192012-04-23 15:58:46 +10001304void
Dave Chinner595bff72014-10-02 09:05:14 +10001305xfs_buf_submit(
1306 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
Dave Chinner595bff72014-10-02 09:05:14 +10001308 trace_xfs_buf_submit(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001310 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Dave Chinner595bff72014-10-02 09:05:14 +10001311 ASSERT(bp->b_flags & XBF_ASYNC);
1312
1313 /* on shutdown we stale and complete the buffer immediately */
1314 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1315 xfs_buf_ioerror(bp, -EIO);
1316 bp->b_flags &= ~XBF_DONE;
1317 xfs_buf_stale(bp);
1318 xfs_buf_ioend(bp);
1319 return;
1320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Christoph Hellwig375ec692011-08-23 08:28:03 +00001322 if (bp->b_flags & XBF_WRITE)
Nathan Scottce8e9222006-01-11 15:39:08 +11001323 xfs_buf_wait_unpin(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Dave Chinner61be9c52014-10-02 09:04:31 +10001325 /* clear the internal error state to avoid spurious errors */
1326 bp->b_io_error = 0;
1327
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001328 /*
Dave Chinner595bff72014-10-02 09:05:14 +10001329 * The caller's reference is released during I/O completion.
1330 * This occurs some time after the last b_io_remaining reference is
1331 * released, so after we drop our Io reference we have to have some
1332 * other reference to ensure the buffer doesn't go away from underneath
1333 * us. Take a direct reference to ensure we have safe access to the
1334 * buffer until we are finished with it.
Dave Chinnere11bb802014-10-02 09:04:11 +10001335 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 xfs_buf_hold(bp);
1337
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001338 /*
Dave Chinnere11bb802014-10-02 09:04:11 +10001339 * Set the count to 1 initially, this will stop an I/O completion
1340 * callout which happens before we have started all the I/O from calling
1341 * xfs_buf_ioend too early.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001343 atomic_set(&bp->b_io_remaining, 1);
1344 _xfs_buf_ioapply(bp);
Dave Chinnere11bb802014-10-02 09:04:11 +10001345
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001346 /*
Dave Chinner595bff72014-10-02 09:05:14 +10001347 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1348 * reference we took above. If we drop it to zero, run completion so
1349 * that we don't return to the caller with completion still pending.
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001350 */
Dave Chinnere8aaba92014-10-02 09:04:22 +10001351 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
Dave Chinner595bff72014-10-02 09:05:14 +10001352 if (bp->b_error)
Dave Chinnere8aaba92014-10-02 09:04:22 +10001353 xfs_buf_ioend(bp);
1354 else
1355 xfs_buf_ioend_async(bp);
1356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Nathan Scottce8e9222006-01-11 15:39:08 +11001358 xfs_buf_rele(bp);
Dave Chinner595bff72014-10-02 09:05:14 +10001359 /* Note: it is not safe to reference bp now we've dropped our ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
1362/*
Dave Chinner595bff72014-10-02 09:05:14 +10001363 * Synchronous buffer IO submission path, read or write.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 */
1365int
Dave Chinner595bff72014-10-02 09:05:14 +10001366xfs_buf_submit_wait(
1367 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
Dave Chinner595bff72014-10-02 09:05:14 +10001369 int error;
1370
1371 trace_xfs_buf_submit_wait(bp, _RET_IP_);
1372
1373 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1374
1375 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1376 xfs_buf_ioerror(bp, -EIO);
1377 xfs_buf_stale(bp);
1378 bp->b_flags &= ~XBF_DONE;
1379 return -EIO;
1380 }
1381
1382 if (bp->b_flags & XBF_WRITE)
1383 xfs_buf_wait_unpin(bp);
1384
1385 /* clear the internal error state to avoid spurious errors */
1386 bp->b_io_error = 0;
1387
1388 /*
1389 * For synchronous IO, the IO does not inherit the submitters reference
1390 * count, nor the buffer lock. Hence we cannot release the reference we
1391 * are about to take until we've waited for all IO completion to occur,
1392 * including any xfs_buf_ioend_async() work that may be pending.
1393 */
1394 xfs_buf_hold(bp);
1395
1396 /*
1397 * Set the count to 1 initially, this will stop an I/O completion
1398 * callout which happens before we have started all the I/O from calling
1399 * xfs_buf_ioend too early.
1400 */
1401 atomic_set(&bp->b_io_remaining, 1);
1402 _xfs_buf_ioapply(bp);
1403
1404 /*
1405 * make sure we run completion synchronously if it raced with us and is
1406 * already complete.
1407 */
1408 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1409 xfs_buf_ioend(bp);
1410
1411 /* wait for completion before gathering the error from the buffer */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001412 trace_xfs_buf_iowait(bp, _RET_IP_);
Dave Chinner595bff72014-10-02 09:05:14 +10001413 wait_for_completion(&bp->b_iowait);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001414 trace_xfs_buf_iowait_done(bp, _RET_IP_);
Dave Chinner595bff72014-10-02 09:05:14 +10001415 error = bp->b_error;
1416
1417 /*
1418 * all done now, we can release the hold that keeps the buffer
1419 * referenced for the entire IO.
1420 */
1421 xfs_buf_rele(bp);
1422 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423}
1424
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001425void *
Nathan Scottce8e9222006-01-11 15:39:08 +11001426xfs_buf_offset(
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001427 struct xfs_buf *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 size_t offset)
1429{
1430 struct page *page;
1431
Dave Chinner611c9942012-04-23 15:59:07 +10001432 if (bp->b_addr)
Chandra Seetharaman62926042011-07-22 23:40:15 +00001433 return bp->b_addr + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
Nathan Scottce8e9222006-01-11 15:39:08 +11001435 offset += bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001436 page = bp->b_pages[offset >> PAGE_SHIFT];
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001437 return page_address(page) + (offset & (PAGE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}
1439
1440/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 * Move data into or out of a buffer.
1442 */
1443void
Nathan Scottce8e9222006-01-11 15:39:08 +11001444xfs_buf_iomove(
1445 xfs_buf_t *bp, /* buffer to process */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 size_t boff, /* starting buffer offset */
1447 size_t bsize, /* length to copy */
Dave Chinnerb9c48642010-01-20 10:47:39 +11001448 void *data, /* data address */
Nathan Scottce8e9222006-01-11 15:39:08 +11001449 xfs_buf_rw_t mode) /* read/write/zero flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450{
Dave Chinner795cac72012-04-23 15:58:53 +10001451 size_t bend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 bend = boff + bsize;
1454 while (boff < bend) {
Dave Chinner795cac72012-04-23 15:58:53 +10001455 struct page *page;
1456 int page_index, page_offset, csize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Dave Chinner795cac72012-04-23 15:58:53 +10001458 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1459 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1460 page = bp->b_pages[page_index];
1461 csize = min_t(size_t, PAGE_SIZE - page_offset,
1462 BBTOB(bp->b_io_length) - boff);
1463
1464 ASSERT((csize + page_offset) <= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 switch (mode) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001467 case XBRW_ZERO:
Dave Chinner795cac72012-04-23 15:58:53 +10001468 memset(page_address(page) + page_offset, 0, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001470 case XBRW_READ:
Dave Chinner795cac72012-04-23 15:58:53 +10001471 memcpy(data, page_address(page) + page_offset, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001473 case XBRW_WRITE:
Dave Chinner795cac72012-04-23 15:58:53 +10001474 memcpy(page_address(page) + page_offset, data, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
1477 boff += csize;
1478 data += csize;
1479 }
1480}
1481
1482/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001483 * Handling of buffer targets (buftargs).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 */
1485
1486/*
Dave Chinner430cbeb2010-12-02 16:30:55 +11001487 * Wait for any bufs with callbacks that have been submitted but have not yet
1488 * returned. These buffers will have an elevated hold count, so wait on those
1489 * while freeing all the buffers only held by the LRU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001491static enum lru_status
1492xfs_buftarg_wait_rele(
1493 struct list_head *item,
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001494 struct list_lru_one *lru,
Dave Chinnere80dfa12013-08-28 10:18:05 +10001495 spinlock_t *lru_lock,
1496 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Dave Chinnere80dfa12013-08-28 10:18:05 +10001498{
1499 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
Dave Chinnera4082352013-08-28 10:18:06 +10001500 struct list_head *dispose = arg;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001501
1502 if (atomic_read(&bp->b_hold) > 1) {
Dave Chinnera4082352013-08-28 10:18:06 +10001503 /* need to wait, so skip it this pass */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001504 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
Dave Chinnera4082352013-08-28 10:18:06 +10001505 return LRU_SKIP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 }
Dave Chinnera4082352013-08-28 10:18:06 +10001507 if (!spin_trylock(&bp->b_lock))
1508 return LRU_SKIP;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001509
Dave Chinnera4082352013-08-28 10:18:06 +10001510 /*
1511 * clear the LRU reference count so the buffer doesn't get
1512 * ignored in xfs_buf_rele().
1513 */
1514 atomic_set(&bp->b_lru_ref, 0);
1515 bp->b_state |= XFS_BSTATE_DISPOSE;
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001516 list_lru_isolate_move(lru, item, dispose);
Dave Chinnera4082352013-08-28 10:18:06 +10001517 spin_unlock(&bp->b_lock);
1518 return LRU_REMOVED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519}
1520
Dave Chinnere80dfa12013-08-28 10:18:05 +10001521void
1522xfs_wait_buftarg(
1523 struct xfs_buftarg *btp)
1524{
Dave Chinnera4082352013-08-28 10:18:06 +10001525 LIST_HEAD(dispose);
1526 int loop = 0;
1527
Dave Chinner85bec542016-01-19 08:28:10 +11001528 /*
1529 * We need to flush the buffer workqueue to ensure that all IO
1530 * completion processing is 100% done. Just waiting on buffer locks is
1531 * not sufficient for async IO as the reference count held over IO is
1532 * not released until after the buffer lock is dropped. Hence we need to
1533 * ensure here that all reference counts have been dropped before we
1534 * start walking the LRU list.
1535 */
1536 drain_workqueue(btp->bt_mount->m_buf_workqueue);
1537
Dave Chinnera4082352013-08-28 10:18:06 +10001538 /* loop until there is nothing left on the lru list. */
1539 while (list_lru_count(&btp->bt_lru)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001540 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
Dave Chinnera4082352013-08-28 10:18:06 +10001541 &dispose, LONG_MAX);
1542
1543 while (!list_empty(&dispose)) {
1544 struct xfs_buf *bp;
1545 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1546 list_del_init(&bp->b_lru);
Dave Chinnerac8809f2013-12-12 16:34:38 +11001547 if (bp->b_flags & XBF_WRITE_FAIL) {
1548 xfs_alert(btp->bt_mount,
Joe Perchesf41febd2015-07-29 11:52:04 +10001549"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
Dave Chinnerac8809f2013-12-12 16:34:38 +11001550 (long long)bp->b_bn);
Joe Perchesf41febd2015-07-29 11:52:04 +10001551 xfs_alert(btp->bt_mount,
1552"Please run xfs_repair to determine the extent of the problem.");
Dave Chinnerac8809f2013-12-12 16:34:38 +11001553 }
Dave Chinnera4082352013-08-28 10:18:06 +10001554 xfs_buf_rele(bp);
1555 }
1556 if (loop++ != 0)
1557 delay(100);
1558 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001559}
1560
1561static enum lru_status
1562xfs_buftarg_isolate(
1563 struct list_head *item,
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001564 struct list_lru_one *lru,
Dave Chinnere80dfa12013-08-28 10:18:05 +10001565 spinlock_t *lru_lock,
1566 void *arg)
1567{
1568 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1569 struct list_head *dispose = arg;
1570
1571 /*
Dave Chinnera4082352013-08-28 10:18:06 +10001572 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1573 * If we fail to get the lock, just skip it.
1574 */
1575 if (!spin_trylock(&bp->b_lock))
1576 return LRU_SKIP;
1577 /*
Dave Chinnere80dfa12013-08-28 10:18:05 +10001578 * Decrement the b_lru_ref count unless the value is already
1579 * zero. If the value is already zero, we need to reclaim the
1580 * buffer, otherwise it gets another trip through the LRU.
1581 */
Dave Chinnera4082352013-08-28 10:18:06 +10001582 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1583 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001584 return LRU_ROTATE;
Dave Chinnera4082352013-08-28 10:18:06 +10001585 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001586
Dave Chinnera4082352013-08-28 10:18:06 +10001587 bp->b_state |= XFS_BSTATE_DISPOSE;
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001588 list_lru_isolate_move(lru, item, dispose);
Dave Chinnera4082352013-08-28 10:18:06 +10001589 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001590 return LRU_REMOVED;
1591}
1592
Andrew Mortonaddbda42013-08-28 10:18:06 +10001593static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001594xfs_buftarg_shrink_scan(
Dave Chinnerff57ab22010-11-30 17:27:57 +11001595 struct shrinker *shrink,
Ying Han1495f232011-05-24 17:12:27 -07001596 struct shrink_control *sc)
David Chinnera6867a62006-01-11 15:37:58 +11001597{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001598 struct xfs_buftarg *btp = container_of(shrink,
1599 struct xfs_buftarg, bt_shrinker);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001600 LIST_HEAD(dispose);
Andrew Mortonaddbda42013-08-28 10:18:06 +10001601 unsigned long freed;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001602
Vladimir Davydov503c3582015-02-12 14:58:47 -08001603 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1604 xfs_buftarg_isolate, &dispose);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001605
1606 while (!list_empty(&dispose)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001607 struct xfs_buf *bp;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001608 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1609 list_del_init(&bp->b_lru);
1610 xfs_buf_rele(bp);
1611 }
1612
Dave Chinnere80dfa12013-08-28 10:18:05 +10001613 return freed;
1614}
1615
Andrew Mortonaddbda42013-08-28 10:18:06 +10001616static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001617xfs_buftarg_shrink_count(
1618 struct shrinker *shrink,
1619 struct shrink_control *sc)
1620{
1621 struct xfs_buftarg *btp = container_of(shrink,
1622 struct xfs_buftarg, bt_shrinker);
Vladimir Davydov503c3582015-02-12 14:58:47 -08001623 return list_lru_shrink_count(&btp->bt_lru, sc);
David Chinnera6867a62006-01-11 15:37:58 +11001624}
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626void
1627xfs_free_buftarg(
Christoph Hellwigb7963132009-03-03 14:48:37 -05001628 struct xfs_mount *mp,
1629 struct xfs_buftarg *btp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001631 unregister_shrinker(&btp->bt_shrinker);
Glauber Costaf5e1dd32013-08-28 10:18:18 +10001632 list_lru_destroy(&btp->bt_lru);
Dave Chinnerff57ab22010-11-30 17:27:57 +11001633
Christoph Hellwigb7963132009-03-03 14:48:37 -05001634 if (mp->m_flags & XFS_MOUNT_BARRIER)
1635 xfs_blkdev_issue_flush(btp);
David Chinnera6867a62006-01-11 15:37:58 +11001636
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001637 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
1639
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001640int
1641xfs_setsize_buftarg(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 xfs_buftarg_t *btp,
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001643 unsigned int sectorsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644{
Eric Sandeen7c71ee72014-01-21 16:46:23 -06001645 /* Set up metadata sector size info */
Eric Sandeen6da54172014-01-21 16:45:52 -06001646 btp->bt_meta_sectorsize = sectorsize;
1647 btp->bt_meta_sectormask = sectorsize - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Nathan Scottce8e9222006-01-11 15:39:08 +11001649 if (set_blocksize(btp->bt_bdev, sectorsize)) {
Dave Chinner4f107002011-03-07 10:00:35 +11001650 xfs_warn(btp->bt_mount,
Dmitry Monakhova1c6f0572015-04-13 16:31:37 +04001651 "Cannot set_blocksize to %u on device %pg",
1652 sectorsize, btp->bt_bdev);
Dave Chinner24513372014-06-25 14:58:08 +10001653 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 }
1655
Eric Sandeen7c71ee72014-01-21 16:46:23 -06001656 /* Set up device logical sector size mask */
1657 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1658 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 return 0;
1661}
1662
1663/*
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001664 * When allocating the initial buffer target we have not yet
1665 * read in the superblock, so don't know what sized sectors
1666 * are being used at this early stage. Play safe.
Nathan Scottce8e9222006-01-11 15:39:08 +11001667 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668STATIC int
1669xfs_setsize_buftarg_early(
1670 xfs_buftarg_t *btp,
1671 struct block_device *bdev)
1672{
Eric Sandeena96c4152014-04-14 19:00:29 +10001673 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674}
1675
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676xfs_buftarg_t *
1677xfs_alloc_buftarg(
Dave Chinnerebad8612010-09-22 10:47:20 +10001678 struct xfs_mount *mp,
Eric Sandeen34dcefd2014-04-14 19:01:00 +10001679 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680{
1681 xfs_buftarg_t *btp;
1682
Dave Chinnerb17cb362013-05-20 09:51:12 +10001683 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Dave Chinnerebad8612010-09-22 10:47:20 +10001685 btp->bt_mount = mp;
Nathan Scottce8e9222006-01-11 15:39:08 +11001686 btp->bt_dev = bdev->bd_dev;
1687 btp->bt_bdev = bdev;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001688 btp->bt_bdi = blk_get_backing_dev_info(bdev);
Dave Chinner0e6e8472011-03-26 09:16:45 +11001689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 if (xfs_setsize_buftarg_early(btp, bdev))
1691 goto error;
Glauber Costa5ca302c2013-08-28 10:18:18 +10001692
1693 if (list_lru_init(&btp->bt_lru))
1694 goto error;
1695
Dave Chinnere80dfa12013-08-28 10:18:05 +10001696 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1697 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001698 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001699 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001700 register_shrinker(&btp->bt_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return btp;
1702
1703error:
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001704 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return NULL;
1706}
1707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708/*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001709 * Add a buffer to the delayed write list.
1710 *
1711 * This queues a buffer for writeout if it hasn't already been. Note that
1712 * neither this routine nor the buffer list submission functions perform
1713 * any internal synchronization. It is expected that the lists are thread-local
1714 * to the callers.
1715 *
1716 * Returns true if we queued up the buffer, or false if it already had
1717 * been on the buffer list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001719bool
Nathan Scottce8e9222006-01-11 15:39:08 +11001720xfs_buf_delwri_queue(
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001721 struct xfs_buf *bp,
1722 struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001724 ASSERT(xfs_buf_islocked(bp));
1725 ASSERT(!(bp->b_flags & XBF_READ));
1726
1727 /*
1728 * If the buffer is already marked delwri it already is queued up
1729 * by someone else for imediate writeout. Just ignore it in that
1730 * case.
1731 */
1732 if (bp->b_flags & _XBF_DELWRI_Q) {
1733 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1734 return false;
1735 }
David Chinnera6867a62006-01-11 15:37:58 +11001736
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001737 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1738
Dave Chinnerd808f612010-02-02 10:13:42 +11001739 /*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001740 * If a buffer gets written out synchronously or marked stale while it
1741 * is on a delwri list we lazily remove it. To do this, the other party
1742 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1743 * It remains referenced and on the list. In a rare corner case it
1744 * might get readded to a delwri list after the synchronous writeout, in
1745 * which case we need just need to re-add the flag here.
Dave Chinnerd808f612010-02-02 10:13:42 +11001746 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001747 bp->b_flags |= _XBF_DELWRI_Q;
1748 if (list_empty(&bp->b_list)) {
1749 atomic_inc(&bp->b_hold);
1750 list_add_tail(&bp->b_list, list);
David Chinner585e6d82007-02-10 18:32:29 +11001751 }
David Chinner585e6d82007-02-10 18:32:29 +11001752
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001753 return true;
David Chinner585e6d82007-02-10 18:32:29 +11001754}
1755
Dave Chinner089716a2010-01-26 15:13:25 +11001756/*
1757 * Compare function is more complex than it needs to be because
1758 * the return value is only 32 bits and we are doing comparisons
1759 * on 64 bit values
1760 */
1761static int
1762xfs_buf_cmp(
1763 void *priv,
1764 struct list_head *a,
1765 struct list_head *b)
1766{
1767 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1768 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1769 xfs_daddr_t diff;
1770
Mark Tinguelyf4b42422012-12-04 17:18:02 -06001771 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
Dave Chinner089716a2010-01-26 15:13:25 +11001772 if (diff < 0)
1773 return -1;
1774 if (diff > 0)
1775 return 1;
1776 return 0;
1777}
1778
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001779static int
1780__xfs_buf_delwri_submit(
1781 struct list_head *buffer_list,
1782 struct list_head *io_list,
1783 bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001785 struct blk_plug plug;
1786 struct xfs_buf *bp, *n;
1787 int pinned = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001789 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1790 if (!wait) {
1791 if (xfs_buf_ispinned(bp)) {
1792 pinned++;
1793 continue;
1794 }
1795 if (!xfs_buf_trylock(bp))
1796 continue;
1797 } else {
1798 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001801 /*
1802 * Someone else might have written the buffer synchronously or
1803 * marked it stale in the meantime. In that case only the
1804 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1805 * reference and remove it from the list here.
1806 */
1807 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1808 list_del_init(&bp->b_list);
1809 xfs_buf_relse(bp);
1810 continue;
1811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001813 list_move_tail(&bp->b_list, io_list);
1814 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001817 list_sort(NULL, io_list, xfs_buf_cmp);
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001818
1819 blk_start_plug(&plug);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001820 list_for_each_entry_safe(bp, n, io_list, b_list) {
Dave Chinnerac8809f2013-12-12 16:34:38 +11001821 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
Dave Chinnercf53e992014-10-02 09:04:01 +10001822 bp->b_flags |= XBF_WRITE | XBF_ASYNC;
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001823
Dave Chinnercf53e992014-10-02 09:04:01 +10001824 /*
1825 * we do all Io submission async. This means if we need to wait
1826 * for IO completion we need to take an extra reference so the
1827 * buffer is still valid on the other side.
1828 */
1829 if (wait)
1830 xfs_buf_hold(bp);
1831 else
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001832 list_del_init(&bp->b_list);
Dave Chinner8dac3922014-10-02 09:04:40 +10001833
Dave Chinner595bff72014-10-02 09:05:14 +10001834 xfs_buf_submit(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 }
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001836 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001838 return pinned;
1839}
Nathan Scottf07c2252006-09-28 10:52:15 +10001840
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001841/*
1842 * Write out a buffer list asynchronously.
1843 *
1844 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1845 * out and not wait for I/O completion on any of the buffers. This interface
1846 * is only safely useable for callers that can track I/O completion by higher
1847 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1848 * function.
1849 */
1850int
1851xfs_buf_delwri_submit_nowait(
1852 struct list_head *buffer_list)
1853{
1854 LIST_HEAD (io_list);
1855 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1856}
1857
1858/*
1859 * Write out a buffer list synchronously.
1860 *
1861 * This will take the @buffer_list, write all buffers out and wait for I/O
1862 * completion on all of the buffers. @buffer_list is consumed by the function,
1863 * so callers must have some other way of tracking buffers if they require such
1864 * functionality.
1865 */
1866int
1867xfs_buf_delwri_submit(
1868 struct list_head *buffer_list)
1869{
1870 LIST_HEAD (io_list);
1871 int error = 0, error2;
1872 struct xfs_buf *bp;
1873
1874 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1875
1876 /* Wait for IO to complete. */
1877 while (!list_empty(&io_list)) {
1878 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1879
1880 list_del_init(&bp->b_list);
Dave Chinnercf53e992014-10-02 09:04:01 +10001881
1882 /* locking the buffer will wait for async IO completion. */
1883 xfs_buf_lock(bp);
1884 error2 = bp->b_error;
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001885 xfs_buf_relse(bp);
1886 if (!error)
1887 error = error2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 }
1889
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001890 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891}
1892
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001893int __init
Nathan Scottce8e9222006-01-11 15:39:08 +11001894xfs_buf_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895{
Nathan Scott87582802006-03-14 13:18:19 +11001896 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1897 KM_ZONE_HWALIGN, NULL);
Nathan Scottce8e9222006-01-11 15:39:08 +11001898 if (!xfs_buf_zone)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001899 goto out;
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001900
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001901 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001903 out:
Nathan Scott87582802006-03-14 13:18:19 +11001904 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905}
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907void
Nathan Scottce8e9222006-01-11 15:39:08 +11001908xfs_buf_terminate(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909{
Nathan Scottce8e9222006-01-11 15:39:08 +11001910 kmem_zone_destroy(xfs_buf_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911}