Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * fs/fs-writeback.c |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds. |
| 5 | * |
| 6 | * Contains all the functions related to writing back and waiting |
| 7 | * upon dirty inodes against superblocks, and writing back dirty |
| 8 | * pages against inodes. ie: data writeback. Writeout of the |
| 9 | * inode itself is not handled here. |
| 10 | * |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 11 | * 10Apr2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Split out of fs/inode.c |
| 13 | * Additions for address_space-based writeback |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Paul Gortmaker | 630d9c4 | 2011-11-16 23:57:37 -0500 | [diff] [blame] | 17 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/spinlock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/sched.h> |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/mm.h> |
Wu Fengguang | bc31b86 | 2012-01-07 20:41:55 -0600 | [diff] [blame] | 23 | #include <linux/pagemap.h> |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 24 | #include <linux/kthread.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/writeback.h> |
| 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/backing-dev.h> |
Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 28 | #include <linux/tracepoint.h> |
Al Viro | 719ea2f | 2013-09-29 11:24:49 -0400 | [diff] [blame] | 29 | #include <linux/device.h> |
David Howells | 07f3f05 | 2006-09-30 20:52:18 +0200 | [diff] [blame] | 30 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Jens Axboe | d0bceac | 2009-05-18 08:20:32 +0200 | [diff] [blame] | 32 | /* |
Wu Fengguang | bc31b86 | 2012-01-07 20:41:55 -0600 | [diff] [blame] | 33 | * 4MB minimal write chunk size |
| 34 | */ |
| 35 | #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) |
| 36 | |
| 37 | /* |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 38 | * Passed into wb_writeback(), essentially a subset of writeback_control |
| 39 | */ |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 40 | struct wb_writeback_work { |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 41 | long nr_pages; |
| 42 | struct super_block *sb; |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 43 | unsigned long *older_than_this; |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 44 | enum writeback_sync_modes sync_mode; |
Wu Fengguang | 6e6938b | 2010-06-06 10:38:15 -0600 | [diff] [blame] | 45 | unsigned int tagged_writepages:1; |
H Hartley Sweeten | 52957fe | 2010-04-01 20:36:30 -0500 | [diff] [blame] | 46 | unsigned int for_kupdate:1; |
| 47 | unsigned int range_cyclic:1; |
| 48 | unsigned int for_background:1; |
Dave Chinner | 7747bd4 | 2013-07-02 22:38:35 +1000 | [diff] [blame] | 49 | unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 50 | enum wb_reason reason; /* why was writeback initiated? */ |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 51 | |
Jens Axboe | 8010c3b | 2009-09-15 20:04:57 +0200 | [diff] [blame] | 52 | struct list_head list; /* pending work list */ |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 53 | struct completion *done; /* set if the caller waits */ |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 54 | }; |
| 55 | |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 56 | /* |
| 57 | * If an inode is constantly having its pages dirtied, but then the |
| 58 | * updates stop dirtytime_expire_interval seconds in the past, it's |
| 59 | * possible for the worst case time between when an inode has its |
| 60 | * timestamps updated and when they finally get written out to be two |
| 61 | * dirtytime_expire_intervals. We set the default to 12 hours (in |
| 62 | * seconds), which means most of the time inodes will have their |
| 63 | * timestamps written to disk after 12 hours, but in the worst case a |
| 64 | * few inodes might not their timestamps updated for 24 hours. |
| 65 | */ |
| 66 | unsigned int dirtytime_expire_interval = 12 * 60 * 60; |
| 67 | |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 68 | static inline struct inode *wb_inode(struct list_head *head) |
| 69 | { |
| 70 | return list_entry(head, struct inode, i_wb_list); |
| 71 | } |
| 72 | |
Wu Fengguang | 15eb77a | 2012-01-17 11:18:56 -0600 | [diff] [blame] | 73 | /* |
| 74 | * Include the creation of the trace points after defining the |
| 75 | * wb_writeback_work structure and inline functions so that the definition |
| 76 | * remains local to this file. |
| 77 | */ |
| 78 | #define CREATE_TRACE_POINTS |
| 79 | #include <trace/events/writeback.h> |
| 80 | |
Steven Whitehouse | 774016b | 2014-02-06 15:47:47 +0000 | [diff] [blame] | 81 | EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); |
| 82 | |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 83 | static bool wb_io_lists_populated(struct bdi_writeback *wb) |
| 84 | { |
| 85 | if (wb_has_dirty_io(wb)) { |
| 86 | return false; |
| 87 | } else { |
| 88 | set_bit(WB_has_dirty_io, &wb->state); |
Tejun Heo | 95a46c6 | 2015-05-22 17:13:47 -0400 | [diff] [blame] | 89 | WARN_ON_ONCE(!wb->avg_write_bandwidth); |
Tejun Heo | 766a9d6 | 2015-05-22 17:13:46 -0400 | [diff] [blame] | 90 | atomic_long_add(wb->avg_write_bandwidth, |
| 91 | &wb->bdi->tot_write_bandwidth); |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 92 | return true; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | static void wb_io_lists_depopulated(struct bdi_writeback *wb) |
| 97 | { |
| 98 | if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && |
Tejun Heo | 766a9d6 | 2015-05-22 17:13:46 -0400 | [diff] [blame] | 99 | list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 100 | clear_bit(WB_has_dirty_io, &wb->state); |
Tejun Heo | 95a46c6 | 2015-05-22 17:13:47 -0400 | [diff] [blame] | 101 | WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, |
| 102 | &wb->bdi->tot_write_bandwidth) < 0); |
Tejun Heo | 766a9d6 | 2015-05-22 17:13:46 -0400 | [diff] [blame] | 103 | } |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /** |
| 107 | * inode_wb_list_move_locked - move an inode onto a bdi_writeback IO list |
| 108 | * @inode: inode to be moved |
| 109 | * @wb: target bdi_writeback |
| 110 | * @head: one of @wb->b_{dirty|io|more_io} |
| 111 | * |
| 112 | * Move @inode->i_wb_list to @list of @wb and set %WB_has_dirty_io. |
| 113 | * Returns %true if @inode is the first occupant of the !dirty_time IO |
| 114 | * lists; otherwise, %false. |
| 115 | */ |
| 116 | static bool inode_wb_list_move_locked(struct inode *inode, |
| 117 | struct bdi_writeback *wb, |
| 118 | struct list_head *head) |
| 119 | { |
| 120 | assert_spin_locked(&wb->list_lock); |
| 121 | |
| 122 | list_move(&inode->i_wb_list, head); |
| 123 | |
| 124 | /* dirty_time doesn't count as dirty_io until expiration */ |
| 125 | if (head != &wb->b_dirty_time) |
| 126 | return wb_io_lists_populated(wb); |
| 127 | |
| 128 | wb_io_lists_depopulated(wb); |
| 129 | return false; |
| 130 | } |
| 131 | |
| 132 | /** |
| 133 | * inode_wb_list_del_locked - remove an inode from its bdi_writeback IO list |
| 134 | * @inode: inode to be removed |
| 135 | * @wb: bdi_writeback @inode is being removed from |
| 136 | * |
| 137 | * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and |
| 138 | * clear %WB_has_dirty_io if all are empty afterwards. |
| 139 | */ |
| 140 | static void inode_wb_list_del_locked(struct inode *inode, |
| 141 | struct bdi_writeback *wb) |
| 142 | { |
| 143 | assert_spin_locked(&wb->list_lock); |
| 144 | |
| 145 | list_del_init(&inode->i_wb_list); |
| 146 | wb_io_lists_depopulated(wb); |
| 147 | } |
| 148 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 149 | static void wb_wakeup(struct bdi_writeback *wb) |
Jan Kara | 5acda9d | 2014-04-03 14:46:23 -0700 | [diff] [blame] | 150 | { |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 151 | spin_lock_bh(&wb->work_lock); |
| 152 | if (test_bit(WB_registered, &wb->state)) |
| 153 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
| 154 | spin_unlock_bh(&wb->work_lock); |
Jan Kara | 5acda9d | 2014-04-03 14:46:23 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 157 | static void wb_queue_work(struct bdi_writeback *wb, |
| 158 | struct wb_writeback_work *work) |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 159 | { |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 160 | trace_writeback_queue(wb->bdi, work); |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 161 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 162 | spin_lock_bh(&wb->work_lock); |
| 163 | if (!test_bit(WB_registered, &wb->state)) { |
Jan Kara | 5acda9d | 2014-04-03 14:46:23 -0700 | [diff] [blame] | 164 | if (work->done) |
| 165 | complete(work->done); |
| 166 | goto out_unlock; |
| 167 | } |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 168 | list_add_tail(&work->list, &wb->work_list); |
| 169 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
Jan Kara | 5acda9d | 2014-04-03 14:46:23 -0700 | [diff] [blame] | 170 | out_unlock: |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 171 | spin_unlock_bh(&wb->work_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | } |
| 173 | |
Tejun Heo | 703c270 | 2015-05-22 17:13:44 -0400 | [diff] [blame] | 174 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 175 | |
| 176 | /** |
| 177 | * inode_congested - test whether an inode is congested |
| 178 | * @inode: inode to test for congestion |
| 179 | * @cong_bits: mask of WB_[a]sync_congested bits to test |
| 180 | * |
| 181 | * Tests whether @inode is congested. @cong_bits is the mask of congestion |
| 182 | * bits to test and the return value is the mask of set bits. |
| 183 | * |
| 184 | * If cgroup writeback is enabled for @inode, the congestion state is |
| 185 | * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg |
| 186 | * associated with @inode is congested; otherwise, the root wb's congestion |
| 187 | * state is used. |
| 188 | */ |
| 189 | int inode_congested(struct inode *inode, int cong_bits) |
| 190 | { |
| 191 | if (inode) { |
| 192 | struct bdi_writeback *wb = inode_to_wb(inode); |
| 193 | if (wb) |
| 194 | return wb_congested(wb, cong_bits); |
| 195 | } |
| 196 | |
| 197 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); |
| 198 | } |
| 199 | EXPORT_SYMBOL_GPL(inode_congested); |
| 200 | |
Tejun Heo | f2b6512 | 2015-05-22 17:13:55 -0400 | [diff] [blame] | 201 | /** |
| 202 | * wb_split_bdi_pages - split nr_pages to write according to bandwidth |
| 203 | * @wb: target bdi_writeback to split @nr_pages to |
| 204 | * @nr_pages: number of pages to write for the whole bdi |
| 205 | * |
| 206 | * Split @wb's portion of @nr_pages according to @wb's write bandwidth in |
| 207 | * relation to the total write bandwidth of all wb's w/ dirty inodes on |
| 208 | * @wb->bdi. |
| 209 | */ |
| 210 | static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) |
| 211 | { |
| 212 | unsigned long this_bw = wb->avg_write_bandwidth; |
| 213 | unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); |
| 214 | |
| 215 | if (nr_pages == LONG_MAX) |
| 216 | return LONG_MAX; |
| 217 | |
| 218 | /* |
| 219 | * This may be called on clean wb's and proportional distribution |
| 220 | * may not make sense, just use the original @nr_pages in those |
| 221 | * cases. In general, we wanna err on the side of writing more. |
| 222 | */ |
| 223 | if (!tot_bw || this_bw >= tot_bw) |
| 224 | return nr_pages; |
| 225 | else |
| 226 | return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); |
| 227 | } |
| 228 | |
| 229 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 230 | |
| 231 | static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) |
| 232 | { |
| 233 | return nr_pages; |
| 234 | } |
| 235 | |
Tejun Heo | 703c270 | 2015-05-22 17:13:44 -0400 | [diff] [blame] | 236 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 237 | |
Tejun Heo | c00ddad | 2015-05-22 17:13:51 -0400 | [diff] [blame] | 238 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, |
| 239 | bool range_cyclic, enum wb_reason reason) |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 240 | { |
Tejun Heo | c00ddad | 2015-05-22 17:13:51 -0400 | [diff] [blame] | 241 | struct wb_writeback_work *work; |
| 242 | |
| 243 | if (!wb_has_dirty_io(wb)) |
| 244 | return; |
| 245 | |
| 246 | /* |
| 247 | * This is WB_SYNC_NONE writeback, so if allocation fails just |
| 248 | * wakeup the thread for old dirty data writeback |
| 249 | */ |
| 250 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
| 251 | if (!work) { |
| 252 | trace_writeback_nowork(wb->bdi); |
| 253 | wb_wakeup(wb); |
| 254 | return; |
| 255 | } |
| 256 | |
| 257 | work->sync_mode = WB_SYNC_NONE; |
| 258 | work->nr_pages = nr_pages; |
| 259 | work->range_cyclic = range_cyclic; |
| 260 | work->reason = reason; |
| 261 | |
| 262 | wb_queue_work(wb, work); |
Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 263 | } |
Wu Fengguang | d3ddec7 | 2009-09-23 20:33:40 +0800 | [diff] [blame] | 264 | |
Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 265 | /** |
Tejun Heo | 9ecf4866 | 2015-05-22 17:13:54 -0400 | [diff] [blame] | 266 | * wb_start_background_writeback - start background writeback |
| 267 | * @wb: bdi_writback to write from |
Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 268 | * |
| 269 | * Description: |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 270 | * This makes sure WB_SYNC_NONE background writeback happens. When |
Tejun Heo | 9ecf4866 | 2015-05-22 17:13:54 -0400 | [diff] [blame] | 271 | * this function returns, it is only guaranteed that for given wb |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 272 | * some IO is happening if we are over background dirty threshold. |
| 273 | * Caller need not hold sb s_umount semaphore. |
Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 274 | */ |
Tejun Heo | 9ecf4866 | 2015-05-22 17:13:54 -0400 | [diff] [blame] | 275 | void wb_start_background_writeback(struct bdi_writeback *wb) |
Christoph Hellwig | c544419 | 2010-06-08 18:15:15 +0200 | [diff] [blame] | 276 | { |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 277 | /* |
| 278 | * We just wake up the flusher thread. It will perform background |
| 279 | * writeback as soon as there is no other work to do. |
| 280 | */ |
Tejun Heo | 9ecf4866 | 2015-05-22 17:13:54 -0400 | [diff] [blame] | 281 | trace_writeback_wake_background(wb->bdi); |
| 282 | wb_wakeup(wb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | /* |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 286 | * Remove the inode from the writeback list it is on. |
| 287 | */ |
| 288 | void inode_wb_list_del(struct inode *inode) |
| 289 | { |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 290 | struct bdi_writeback *wb = inode_to_wb(inode); |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 291 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 292 | spin_lock(&wb->list_lock); |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 293 | inode_wb_list_del_locked(inode, wb); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 294 | spin_unlock(&wb->list_lock); |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 295 | } |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 296 | |
| 297 | /* |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 298 | * Redirty an inode: set its when-it-was dirtied timestamp and move it to the |
| 299 | * furthest end of its superblock's dirty-inode list. |
| 300 | * |
| 301 | * Before stamping the inode's ->dirtied_when, we check to see whether it is |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 302 | * already the most-recently-dirtied inode on the b_dirty list. If that is |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 303 | * the case then the inode must have been redirtied while it was being written |
| 304 | * out and we don't reset its dirtied_when. |
| 305 | */ |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 306 | static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 307 | { |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 308 | if (!list_empty(&wb->b_dirty)) { |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 309 | struct inode *tail; |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 310 | |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 311 | tail = wb_inode(wb->b_dirty.next); |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 312 | if (time_before(inode->dirtied_when, tail->dirtied_when)) |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 313 | inode->dirtied_when = jiffies; |
| 314 | } |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 315 | inode_wb_list_move_locked(inode, wb, &wb->b_dirty); |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | /* |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 319 | * requeue inode for re-scanning after bdi->b_io list is exhausted. |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 320 | */ |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 321 | static void requeue_io(struct inode *inode, struct bdi_writeback *wb) |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 322 | { |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 323 | inode_wb_list_move_locked(inode, wb, &wb->b_more_io); |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 324 | } |
| 325 | |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 326 | static void inode_sync_complete(struct inode *inode) |
| 327 | { |
Jan Kara | 365b94ae | 2012-05-03 14:47:55 +0200 | [diff] [blame] | 328 | inode->i_state &= ~I_SYNC; |
Jan Kara | 4eff96d | 2012-11-26 16:29:51 -0800 | [diff] [blame] | 329 | /* If inode is clean an unused, put it into LRU now... */ |
| 330 | inode_add_lru(inode); |
Jan Kara | 365b94ae | 2012-05-03 14:47:55 +0200 | [diff] [blame] | 331 | /* Waiters must see I_SYNC cleared before being woken up */ |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 332 | smp_mb(); |
| 333 | wake_up_bit(&inode->i_state, __I_SYNC); |
| 334 | } |
| 335 | |
Jeff Layton | d2caa3c5 | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 336 | static bool inode_dirtied_after(struct inode *inode, unsigned long t) |
| 337 | { |
| 338 | bool ret = time_after(inode->dirtied_when, t); |
| 339 | #ifndef CONFIG_64BIT |
| 340 | /* |
| 341 | * For inodes being constantly redirtied, dirtied_when can get stuck. |
| 342 | * It _appears_ to be in the future, but is actually in distant past. |
| 343 | * This test is necessary to prevent such wrapped-around relative times |
Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 344 | * from permanently stopping the whole bdi writeback. |
Jeff Layton | d2caa3c5 | 2009-04-02 16:56:37 -0700 | [diff] [blame] | 345 | */ |
| 346 | ret = ret && time_before_eq(inode->dirtied_when, jiffies); |
| 347 | #endif |
| 348 | return ret; |
| 349 | } |
| 350 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 351 | #define EXPIRE_DIRTY_ATIME 0x0001 |
| 352 | |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 353 | /* |
Wang Sheng-Hui | 0e2f2b2 | 2012-09-11 08:28:18 +0800 | [diff] [blame] | 354 | * Move expired (dirtied before work->older_than_this) dirty inodes from |
Jan Kara | 697e6fe | 2012-03-09 07:26:22 -0800 | [diff] [blame] | 355 | * @delaying_queue to @dispatch_queue. |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 356 | */ |
Wu Fengguang | e84d0a4 | 2011-04-23 12:27:27 -0600 | [diff] [blame] | 357 | static int move_expired_inodes(struct list_head *delaying_queue, |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 358 | struct list_head *dispatch_queue, |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 359 | int flags, |
Curt Wohlgemuth | ad4e38d | 2011-10-07 21:51:56 -0600 | [diff] [blame] | 360 | struct wb_writeback_work *work) |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 361 | { |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 362 | unsigned long *older_than_this = NULL; |
| 363 | unsigned long expire_time; |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 364 | LIST_HEAD(tmp); |
| 365 | struct list_head *pos, *node; |
Jens Axboe | cf13730 | 2009-09-24 15:12:57 +0200 | [diff] [blame] | 366 | struct super_block *sb = NULL; |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 367 | struct inode *inode; |
Jens Axboe | cf13730 | 2009-09-24 15:12:57 +0200 | [diff] [blame] | 368 | int do_sb_sort = 0; |
Wu Fengguang | e84d0a4 | 2011-04-23 12:27:27 -0600 | [diff] [blame] | 369 | int moved = 0; |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 370 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 371 | if ((flags & EXPIRE_DIRTY_ATIME) == 0) |
| 372 | older_than_this = work->older_than_this; |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 373 | else if (!work->for_sync) { |
| 374 | expire_time = jiffies - (dirtytime_expire_interval * HZ); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 375 | older_than_this = &expire_time; |
| 376 | } |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 377 | while (!list_empty(delaying_queue)) { |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 378 | inode = wb_inode(delaying_queue->prev); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 379 | if (older_than_this && |
| 380 | inode_dirtied_after(inode, *older_than_this)) |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 381 | break; |
Jan Kara | a885599 | 2013-07-09 22:36:45 +0800 | [diff] [blame] | 382 | list_move(&inode->i_wb_list, &tmp); |
| 383 | moved++; |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 384 | if (flags & EXPIRE_DIRTY_ATIME) |
| 385 | set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); |
Jan Kara | a885599 | 2013-07-09 22:36:45 +0800 | [diff] [blame] | 386 | if (sb_is_blkdev_sb(inode->i_sb)) |
| 387 | continue; |
Jens Axboe | cf13730 | 2009-09-24 15:12:57 +0200 | [diff] [blame] | 388 | if (sb && sb != inode->i_sb) |
| 389 | do_sb_sort = 1; |
| 390 | sb = inode->i_sb; |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 391 | } |
| 392 | |
Jens Axboe | cf13730 | 2009-09-24 15:12:57 +0200 | [diff] [blame] | 393 | /* just one sb in list, splice to dispatch_queue and we're done */ |
| 394 | if (!do_sb_sort) { |
| 395 | list_splice(&tmp, dispatch_queue); |
Wu Fengguang | e84d0a4 | 2011-04-23 12:27:27 -0600 | [diff] [blame] | 396 | goto out; |
Jens Axboe | cf13730 | 2009-09-24 15:12:57 +0200 | [diff] [blame] | 397 | } |
| 398 | |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 399 | /* Move inodes from one superblock together */ |
| 400 | while (!list_empty(&tmp)) { |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 401 | sb = wb_inode(tmp.prev)->i_sb; |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 402 | list_for_each_prev_safe(pos, node, &tmp) { |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 403 | inode = wb_inode(pos); |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 404 | if (inode->i_sb == sb) |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 405 | list_move(&inode->i_wb_list, dispatch_queue); |
Shaohua Li | 5c03449 | 2009-09-24 14:42:33 +0200 | [diff] [blame] | 406 | } |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 407 | } |
Wu Fengguang | e84d0a4 | 2011-04-23 12:27:27 -0600 | [diff] [blame] | 408 | out: |
| 409 | return moved; |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Queue all expired dirty inodes for io, eldest first. |
Wu Fengguang | 4ea879b | 2010-08-11 14:17:42 -0700 | [diff] [blame] | 414 | * Before |
| 415 | * newly dirtied b_dirty b_io b_more_io |
| 416 | * =============> gf edc BA |
| 417 | * After |
| 418 | * newly dirtied b_dirty b_io b_more_io |
| 419 | * =============> g fBAedc |
| 420 | * | |
| 421 | * +--> dequeue for IO |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 422 | */ |
Curt Wohlgemuth | ad4e38d | 2011-10-07 21:51:56 -0600 | [diff] [blame] | 423 | static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 424 | { |
Wu Fengguang | e84d0a4 | 2011-04-23 12:27:27 -0600 | [diff] [blame] | 425 | int moved; |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 426 | |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 427 | assert_spin_locked(&wb->list_lock); |
Wu Fengguang | 4ea879b | 2010-08-11 14:17:42 -0700 | [diff] [blame] | 428 | list_splice_init(&wb->b_more_io, &wb->b_io); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 429 | moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); |
| 430 | moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, |
| 431 | EXPIRE_DIRTY_ATIME, work); |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 432 | if (moved) |
| 433 | wb_io_lists_populated(wb); |
Curt Wohlgemuth | ad4e38d | 2011-10-07 21:51:56 -0600 | [diff] [blame] | 434 | trace_writeback_queue_io(wb, work, moved); |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 435 | } |
| 436 | |
Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 437 | static int write_inode(struct inode *inode, struct writeback_control *wbc) |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 438 | { |
Tejun Heo | 9fb0a7d | 2013-01-11 13:06:37 -0800 | [diff] [blame] | 439 | int ret; |
| 440 | |
| 441 | if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { |
| 442 | trace_writeback_write_inode_start(inode, wbc); |
| 443 | ret = inode->i_sb->s_op->write_inode(inode, wbc); |
| 444 | trace_writeback_write_inode(inode, wbc); |
| 445 | return ret; |
| 446 | } |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 447 | return 0; |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 451 | * Wait for writeback on an inode to complete. Called with i_lock held. |
| 452 | * Caller must make sure inode cannot go away when we drop i_lock. |
Christoph Hellwig | 01c0319 | 2009-06-08 13:35:40 +0200 | [diff] [blame] | 453 | */ |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 454 | static void __inode_wait_for_writeback(struct inode *inode) |
| 455 | __releases(inode->i_lock) |
| 456 | __acquires(inode->i_lock) |
Christoph Hellwig | 01c0319 | 2009-06-08 13:35:40 +0200 | [diff] [blame] | 457 | { |
| 458 | DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); |
| 459 | wait_queue_head_t *wqh; |
| 460 | |
| 461 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 462 | while (inode->i_state & I_SYNC) { |
| 463 | spin_unlock(&inode->i_lock); |
NeilBrown | 7431620 | 2014-07-07 15:16:04 +1000 | [diff] [blame] | 464 | __wait_on_bit(wqh, &wq, bit_wait, |
| 465 | TASK_UNINTERRUPTIBLE); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 466 | spin_lock(&inode->i_lock); |
Richard Kennedy | 58a9d3d8 | 2010-05-24 14:32:38 -0700 | [diff] [blame] | 467 | } |
Christoph Hellwig | 01c0319 | 2009-06-08 13:35:40 +0200 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | /* |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 471 | * Wait for writeback on an inode to complete. Caller must have inode pinned. |
| 472 | */ |
| 473 | void inode_wait_for_writeback(struct inode *inode) |
| 474 | { |
| 475 | spin_lock(&inode->i_lock); |
| 476 | __inode_wait_for_writeback(inode); |
| 477 | spin_unlock(&inode->i_lock); |
| 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Sleep until I_SYNC is cleared. This function must be called with i_lock |
| 482 | * held and drops it. It is aimed for callers not holding any inode reference |
| 483 | * so once i_lock is dropped, inode can go away. |
| 484 | */ |
| 485 | static void inode_sleep_on_writeback(struct inode *inode) |
| 486 | __releases(inode->i_lock) |
| 487 | { |
| 488 | DEFINE_WAIT(wait); |
| 489 | wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); |
| 490 | int sleep; |
| 491 | |
| 492 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 493 | sleep = inode->i_state & I_SYNC; |
| 494 | spin_unlock(&inode->i_lock); |
| 495 | if (sleep) |
| 496 | schedule(); |
| 497 | finish_wait(wqh, &wait); |
| 498 | } |
| 499 | |
| 500 | /* |
Jan Kara | ccb26b5 | 2012-05-03 14:47:58 +0200 | [diff] [blame] | 501 | * Find proper writeback list for the inode depending on its current state and |
| 502 | * possibly also change of its state while we were doing writeback. Here we |
| 503 | * handle things such as livelock prevention or fairness of writeback among |
| 504 | * inodes. This function can be called only by flusher thread - noone else |
| 505 | * processes all inodes in writeback lists and requeueing inodes behind flusher |
| 506 | * thread's back can have unexpected consequences. |
| 507 | */ |
| 508 | static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, |
| 509 | struct writeback_control *wbc) |
| 510 | { |
| 511 | if (inode->i_state & I_FREEING) |
| 512 | return; |
| 513 | |
| 514 | /* |
| 515 | * Sync livelock prevention. Each inode is tagged and synced in one |
| 516 | * shot. If still dirty, it will be redirty_tail()'ed below. Update |
| 517 | * the dirty time to prevent enqueue and sync it again. |
| 518 | */ |
| 519 | if ((inode->i_state & I_DIRTY) && |
| 520 | (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) |
| 521 | inode->dirtied_when = jiffies; |
| 522 | |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 523 | if (wbc->pages_skipped) { |
| 524 | /* |
| 525 | * writeback is not making progress due to locked |
| 526 | * buffers. Skip this inode for now. |
| 527 | */ |
| 528 | redirty_tail(inode, wb); |
| 529 | return; |
| 530 | } |
| 531 | |
Jan Kara | ccb26b5 | 2012-05-03 14:47:58 +0200 | [diff] [blame] | 532 | if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { |
| 533 | /* |
| 534 | * We didn't write back all the pages. nfs_writepages() |
| 535 | * sometimes bales out without doing anything. |
| 536 | */ |
| 537 | if (wbc->nr_to_write <= 0) { |
| 538 | /* Slice used up. Queue for next turn. */ |
| 539 | requeue_io(inode, wb); |
| 540 | } else { |
| 541 | /* |
| 542 | * Writeback blocked by something other than |
| 543 | * congestion. Delay the inode for some time to |
| 544 | * avoid spinning on the CPU (100% iowait) |
| 545 | * retrying writeback of the dirty page/inode |
| 546 | * that cannot be performed immediately. |
| 547 | */ |
| 548 | redirty_tail(inode, wb); |
| 549 | } |
| 550 | } else if (inode->i_state & I_DIRTY) { |
| 551 | /* |
| 552 | * Filesystems can dirty the inode during writeback operations, |
| 553 | * such as delayed allocation during submission or metadata |
| 554 | * updates after data IO completion. |
| 555 | */ |
| 556 | redirty_tail(inode, wb); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 557 | } else if (inode->i_state & I_DIRTY_TIME) { |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 558 | inode->dirtied_when = jiffies; |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 559 | inode_wb_list_move_locked(inode, wb, &wb->b_dirty_time); |
Jan Kara | ccb26b5 | 2012-05-03 14:47:58 +0200 | [diff] [blame] | 560 | } else { |
| 561 | /* The inode is clean. Remove from writeback lists. */ |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 562 | inode_wb_list_del_locked(inode, wb); |
Jan Kara | ccb26b5 | 2012-05-03 14:47:58 +0200 | [diff] [blame] | 563 | } |
| 564 | } |
| 565 | |
| 566 | /* |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 567 | * Write out an inode and its dirty pages. Do not update the writeback list |
| 568 | * linkage. That is left to the caller. The caller is also responsible for |
| 569 | * setting I_SYNC flag and calling inode_sync_complete() to clear it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | */ |
| 571 | static int |
Yan Hong | cd8ed2a | 2012-10-08 16:33:45 -0700 | [diff] [blame] | 572 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | struct address_space *mapping = inode->i_mapping; |
Wu Fengguang | 251d6a4 | 2010-12-01 17:33:37 -0600 | [diff] [blame] | 575 | long nr_to_write = wbc->nr_to_write; |
Christoph Hellwig | 01c0319 | 2009-06-08 13:35:40 +0200 | [diff] [blame] | 576 | unsigned dirty; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | int ret; |
| 578 | |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 579 | WARN_ON(!(inode->i_state & I_SYNC)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | |
Tejun Heo | 9fb0a7d | 2013-01-11 13:06:37 -0800 | [diff] [blame] | 581 | trace_writeback_single_inode_start(inode, wbc, nr_to_write); |
| 582 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | ret = do_writepages(mapping, wbc); |
| 584 | |
Christoph Hellwig | 26821ed | 2010-03-05 09:21:21 +0100 | [diff] [blame] | 585 | /* |
| 586 | * Make sure to wait on the data before writing out the metadata. |
| 587 | * This is important for filesystems that modify metadata on data |
Dave Chinner | 7747bd4 | 2013-07-02 22:38:35 +1000 | [diff] [blame] | 588 | * I/O completion. We don't do it for sync(2) writeback because it has a |
| 589 | * separate, external IO completion path and ->sync_fs for guaranteeing |
| 590 | * inode metadata is written back correctly. |
Christoph Hellwig | 26821ed | 2010-03-05 09:21:21 +0100 | [diff] [blame] | 591 | */ |
Dave Chinner | 7747bd4 | 2013-07-02 22:38:35 +1000 | [diff] [blame] | 592 | if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { |
Christoph Hellwig | 26821ed | 2010-03-05 09:21:21 +0100 | [diff] [blame] | 593 | int err = filemap_fdatawait(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | if (ret == 0) |
| 595 | ret = err; |
| 596 | } |
| 597 | |
Dmitry Monakhov | 5547e8a | 2010-05-07 13:35:44 +0400 | [diff] [blame] | 598 | /* |
| 599 | * Some filesystems may redirty the inode during the writeback |
| 600 | * due to delalloc, clear dirty metadata flags right before |
| 601 | * write_inode() |
| 602 | */ |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 603 | spin_lock(&inode->i_lock); |
Tejun Heo | 9c6ac78 | 2014-10-24 15:38:21 -0400 | [diff] [blame] | 604 | |
Dmitry Monakhov | 5547e8a | 2010-05-07 13:35:44 +0400 | [diff] [blame] | 605 | dirty = inode->i_state & I_DIRTY; |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 606 | if (inode->i_state & I_DIRTY_TIME) { |
| 607 | if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || |
| 608 | unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || |
| 609 | unlikely(time_after(jiffies, |
| 610 | (inode->dirtied_time_when + |
| 611 | dirtytime_expire_interval * HZ)))) { |
| 612 | dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; |
| 613 | trace_writeback_lazytime(inode); |
| 614 | } |
| 615 | } else |
| 616 | inode->i_state &= ~I_DIRTY_TIME_EXPIRED; |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 617 | inode->i_state &= ~dirty; |
Tejun Heo | 9c6ac78 | 2014-10-24 15:38:21 -0400 | [diff] [blame] | 618 | |
| 619 | /* |
| 620 | * Paired with smp_mb() in __mark_inode_dirty(). This allows |
| 621 | * __mark_inode_dirty() to test i_state without grabbing i_lock - |
| 622 | * either they see the I_DIRTY bits cleared or we see the dirtied |
| 623 | * inode. |
| 624 | * |
| 625 | * I_DIRTY_PAGES is always cleared together above even if @mapping |
| 626 | * still has dirty pages. The flag is reinstated after smp_mb() if |
| 627 | * necessary. This guarantees that either __mark_inode_dirty() |
| 628 | * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. |
| 629 | */ |
| 630 | smp_mb(); |
| 631 | |
| 632 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
| 633 | inode->i_state |= I_DIRTY_PAGES; |
| 634 | |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 635 | spin_unlock(&inode->i_lock); |
Tejun Heo | 9c6ac78 | 2014-10-24 15:38:21 -0400 | [diff] [blame] | 636 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 637 | if (dirty & I_DIRTY_TIME) |
| 638 | mark_inode_dirty_sync(inode); |
Christoph Hellwig | 26821ed | 2010-03-05 09:21:21 +0100 | [diff] [blame] | 639 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 640 | if (dirty & ~I_DIRTY_PAGES) { |
Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 641 | int err = write_inode(inode, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | if (ret == 0) |
| 643 | ret = err; |
| 644 | } |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 645 | trace_writeback_single_inode(inode, wbc, nr_to_write); |
| 646 | return ret; |
| 647 | } |
| 648 | |
| 649 | /* |
| 650 | * Write out an inode's dirty pages. Either the caller has an active reference |
| 651 | * on the inode or the inode has I_WILL_FREE set. |
| 652 | * |
| 653 | * This function is designed to be called for writing back one inode which |
| 654 | * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() |
| 655 | * and does more profound writeback list handling in writeback_sb_inodes(). |
| 656 | */ |
| 657 | static int |
| 658 | writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, |
| 659 | struct writeback_control *wbc) |
| 660 | { |
| 661 | int ret = 0; |
| 662 | |
| 663 | spin_lock(&inode->i_lock); |
| 664 | if (!atomic_read(&inode->i_count)) |
| 665 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); |
| 666 | else |
| 667 | WARN_ON(inode->i_state & I_WILL_FREE); |
| 668 | |
| 669 | if (inode->i_state & I_SYNC) { |
| 670 | if (wbc->sync_mode != WB_SYNC_ALL) |
| 671 | goto out; |
| 672 | /* |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 673 | * It's a data-integrity sync. We must wait. Since callers hold |
| 674 | * inode reference or inode has I_WILL_FREE set, it cannot go |
| 675 | * away under us. |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 676 | */ |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 677 | __inode_wait_for_writeback(inode); |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 678 | } |
| 679 | WARN_ON(inode->i_state & I_SYNC); |
| 680 | /* |
Jan Kara | f9b0e05 | 2013-12-14 04:21:26 +0800 | [diff] [blame] | 681 | * Skip inode if it is clean and we have no outstanding writeback in |
| 682 | * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this |
| 683 | * function since flusher thread may be doing for example sync in |
| 684 | * parallel and if we move the inode, it could get skipped. So here we |
| 685 | * make sure inode is on some writeback list and leave it there unless |
| 686 | * we have completely cleaned the inode. |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 687 | */ |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 688 | if (!(inode->i_state & I_DIRTY_ALL) && |
Jan Kara | f9b0e05 | 2013-12-14 04:21:26 +0800 | [diff] [blame] | 689 | (wbc->sync_mode != WB_SYNC_ALL || |
| 690 | !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 691 | goto out; |
| 692 | inode->i_state |= I_SYNC; |
| 693 | spin_unlock(&inode->i_lock); |
| 694 | |
Yan Hong | cd8ed2a | 2012-10-08 16:33:45 -0700 | [diff] [blame] | 695 | ret = __writeback_single_inode(inode, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 697 | spin_lock(&wb->list_lock); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 698 | spin_lock(&inode->i_lock); |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 699 | /* |
| 700 | * If inode is clean, remove it from writeback lists. Otherwise don't |
| 701 | * touch it. See comment above for explanation. |
| 702 | */ |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 703 | if (!(inode->i_state & I_DIRTY_ALL)) |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 704 | inode_wb_list_del_locked(inode, wb); |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 705 | spin_unlock(&wb->list_lock); |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 706 | inode_sync_complete(inode); |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 707 | out: |
| 708 | spin_unlock(&inode->i_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | return ret; |
| 710 | } |
| 711 | |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 712 | static long writeback_chunk_size(struct bdi_writeback *wb, |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 713 | struct wb_writeback_work *work) |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 714 | { |
| 715 | long pages; |
| 716 | |
| 717 | /* |
| 718 | * WB_SYNC_ALL mode does livelock avoidance by syncing dirty |
| 719 | * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX |
| 720 | * here avoids calling into writeback_inodes_wb() more than once. |
| 721 | * |
| 722 | * The intended call sequence for WB_SYNC_ALL writeback is: |
| 723 | * |
| 724 | * wb_writeback() |
| 725 | * writeback_sb_inodes() <== called only once |
| 726 | * write_cache_pages() <== called once for each inode |
| 727 | * (quickly) tag currently dirty pages |
| 728 | * (maybe slowly) sync all tagged pages |
| 729 | */ |
| 730 | if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) |
| 731 | pages = LONG_MAX; |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 732 | else { |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 733 | pages = min(wb->avg_write_bandwidth / 2, |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 734 | global_dirty_limit / DIRTY_SCOPE); |
| 735 | pages = min(pages, work->nr_pages); |
| 736 | pages = round_down(pages + MIN_WRITEBACK_PAGES, |
| 737 | MIN_WRITEBACK_PAGES); |
| 738 | } |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 739 | |
| 740 | return pages; |
| 741 | } |
| 742 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 743 | /* |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 744 | * Write a portion of b_io inodes which belong to @sb. |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 745 | * |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 746 | * Return the number of pages and/or inodes written. |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 747 | */ |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 748 | static long writeback_sb_inodes(struct super_block *sb, |
| 749 | struct bdi_writeback *wb, |
| 750 | struct wb_writeback_work *work) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 751 | { |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 752 | struct writeback_control wbc = { |
| 753 | .sync_mode = work->sync_mode, |
| 754 | .tagged_writepages = work->tagged_writepages, |
| 755 | .for_kupdate = work->for_kupdate, |
| 756 | .for_background = work->for_background, |
Dave Chinner | 7747bd4 | 2013-07-02 22:38:35 +1000 | [diff] [blame] | 757 | .for_sync = work->for_sync, |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 758 | .range_cyclic = work->range_cyclic, |
| 759 | .range_start = 0, |
| 760 | .range_end = LLONG_MAX, |
| 761 | }; |
| 762 | unsigned long start_time = jiffies; |
| 763 | long write_chunk; |
| 764 | long wrote = 0; /* count both pages and inodes */ |
| 765 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 766 | while (!list_empty(&wb->b_io)) { |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 767 | struct inode *inode = wb_inode(wb->b_io.prev); |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 768 | |
| 769 | if (inode->i_sb != sb) { |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 770 | if (work->sb) { |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 771 | /* |
| 772 | * We only want to write back data for this |
| 773 | * superblock, move all inodes not belonging |
| 774 | * to it back onto the dirty list. |
| 775 | */ |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 776 | redirty_tail(inode, wb); |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 777 | continue; |
| 778 | } |
| 779 | |
| 780 | /* |
| 781 | * The inode belongs to a different superblock. |
| 782 | * Bounce back to the caller to unpin this and |
| 783 | * pin the next superblock. |
| 784 | */ |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 785 | break; |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 786 | } |
| 787 | |
Christoph Hellwig | 9843b76 | 2010-10-24 19:40:46 +0200 | [diff] [blame] | 788 | /* |
Wanpeng Li | 331cbde | 2012-06-09 11:10:55 +0800 | [diff] [blame] | 789 | * Don't bother with new inodes or inodes being freed, first |
| 790 | * kind does not need periodic writeout yet, and for the latter |
Christoph Hellwig | 9843b76 | 2010-10-24 19:40:46 +0200 | [diff] [blame] | 791 | * kind writeout is handled by the freer. |
| 792 | */ |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 793 | spin_lock(&inode->i_lock); |
Christoph Hellwig | 9843b76 | 2010-10-24 19:40:46 +0200 | [diff] [blame] | 794 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 795 | spin_unlock(&inode->i_lock); |
Wu Fengguang | fcc5c22 | 2011-07-11 23:08:50 -0700 | [diff] [blame] | 796 | redirty_tail(inode, wb); |
Nick Piggin | 7ef0d73 | 2009-03-12 14:31:38 -0700 | [diff] [blame] | 797 | continue; |
| 798 | } |
Jan Kara | cc1676d | 2012-05-03 14:47:56 +0200 | [diff] [blame] | 799 | if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { |
| 800 | /* |
| 801 | * If this inode is locked for writeback and we are not |
| 802 | * doing writeback-for-data-integrity, move it to |
| 803 | * b_more_io so that writeback can proceed with the |
| 804 | * other inodes on s_io. |
| 805 | * |
| 806 | * We'll have another go at writing back this inode |
| 807 | * when we completed a full scan of b_io. |
| 808 | */ |
| 809 | spin_unlock(&inode->i_lock); |
| 810 | requeue_io(inode, wb); |
| 811 | trace_writeback_sb_inodes_requeue(inode); |
| 812 | continue; |
| 813 | } |
Jan Kara | f0d07b7 | 2012-05-03 14:47:59 +0200 | [diff] [blame] | 814 | spin_unlock(&wb->list_lock); |
| 815 | |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 816 | /* |
| 817 | * We already requeued the inode if it had I_SYNC set and we |
| 818 | * are doing WB_SYNC_NONE writeback. So this catches only the |
| 819 | * WB_SYNC_ALL case. |
| 820 | */ |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 821 | if (inode->i_state & I_SYNC) { |
| 822 | /* Wait for I_SYNC. This function drops i_lock... */ |
| 823 | inode_sleep_on_writeback(inode); |
| 824 | /* Inode may be gone, start again */ |
Jan Kara | ead188f | 2012-06-08 17:07:36 +0200 | [diff] [blame] | 825 | spin_lock(&wb->list_lock); |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 826 | continue; |
| 827 | } |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 828 | inode->i_state |= I_SYNC; |
| 829 | spin_unlock(&inode->i_lock); |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 830 | |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 831 | write_chunk = writeback_chunk_size(wb, work); |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 832 | wbc.nr_to_write = write_chunk; |
| 833 | wbc.pages_skipped = 0; |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 834 | |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 835 | /* |
| 836 | * We use I_SYNC to pin the inode in memory. While it is set |
| 837 | * evict_inode() will wait so the inode cannot be freed. |
| 838 | */ |
Yan Hong | cd8ed2a | 2012-10-08 16:33:45 -0700 | [diff] [blame] | 839 | __writeback_single_inode(inode, &wbc); |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 840 | |
| 841 | work->nr_pages -= write_chunk - wbc.nr_to_write; |
| 842 | wrote += write_chunk - wbc.nr_to_write; |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 843 | spin_lock(&wb->list_lock); |
| 844 | spin_lock(&inode->i_lock); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 845 | if (!(inode->i_state & I_DIRTY_ALL)) |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 846 | wrote++; |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 847 | requeue_inode(inode, wb, &wbc); |
| 848 | inode_sync_complete(inode); |
Dave Chinner | 0f1b1fd | 2011-03-22 22:23:43 +1100 | [diff] [blame] | 849 | spin_unlock(&inode->i_lock); |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 850 | cond_resched_lock(&wb->list_lock); |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 851 | /* |
| 852 | * bail out to wb_writeback() often enough to check |
| 853 | * background threshold and other termination conditions. |
| 854 | */ |
| 855 | if (wrote) { |
| 856 | if (time_is_before_jiffies(start_time + HZ / 10UL)) |
| 857 | break; |
| 858 | if (work->nr_pages <= 0) |
| 859 | break; |
Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 860 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | } |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 862 | return wrote; |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 863 | } |
Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 864 | |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 865 | static long __writeback_inodes_wb(struct bdi_writeback *wb, |
| 866 | struct wb_writeback_work *work) |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 867 | { |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 868 | unsigned long start_time = jiffies; |
| 869 | long wrote = 0; |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 870 | |
| 871 | while (!list_empty(&wb->b_io)) { |
Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 872 | struct inode *inode = wb_inode(wb->b_io.prev); |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 873 | struct super_block *sb = inode->i_sb; |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 874 | |
Konstantin Khlebnikov | eb6ef3d | 2015-02-19 20:19:35 +0300 | [diff] [blame] | 875 | if (!trylock_super(sb)) { |
Wu Fengguang | 0e99581 | 2011-07-29 22:14:35 -0600 | [diff] [blame] | 876 | /* |
Konstantin Khlebnikov | eb6ef3d | 2015-02-19 20:19:35 +0300 | [diff] [blame] | 877 | * trylock_super() may fail consistently due to |
Wu Fengguang | 0e99581 | 2011-07-29 22:14:35 -0600 | [diff] [blame] | 878 | * s_umount being grabbed by someone else. Don't use |
| 879 | * requeue_io() to avoid busy retrying the inode/sb. |
| 880 | */ |
| 881 | redirty_tail(inode, wb); |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 882 | continue; |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 883 | } |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 884 | wrote += writeback_sb_inodes(sb, wb, work); |
Konstantin Khlebnikov | eb6ef3d | 2015-02-19 20:19:35 +0300 | [diff] [blame] | 885 | up_read(&sb->s_umount); |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 886 | |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 887 | /* refer to the same tests at the end of writeback_sb_inodes */ |
| 888 | if (wrote) { |
| 889 | if (time_is_before_jiffies(start_time + HZ / 10UL)) |
| 890 | break; |
| 891 | if (work->nr_pages <= 0) |
| 892 | break; |
| 893 | } |
Edward Shishkin | f11c9c5 | 2010-03-11 14:09:47 -0800 | [diff] [blame] | 894 | } |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 895 | /* Leave any unwritten inodes on b_io */ |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 896 | return wrote; |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 897 | } |
| 898 | |
Wanpeng Li | 7d9f073 | 2013-09-11 14:22:40 -0700 | [diff] [blame] | 899 | static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 900 | enum wb_reason reason) |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 901 | { |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 902 | struct wb_writeback_work work = { |
| 903 | .nr_pages = nr_pages, |
| 904 | .sync_mode = WB_SYNC_NONE, |
| 905 | .range_cyclic = 1, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 906 | .reason = reason, |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 907 | }; |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 908 | |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 909 | spin_lock(&wb->list_lock); |
Wu Fengguang | 424b351 | 2010-07-21 20:11:53 -0600 | [diff] [blame] | 910 | if (list_empty(&wb->b_io)) |
Curt Wohlgemuth | ad4e38d | 2011-10-07 21:51:56 -0600 | [diff] [blame] | 911 | queue_io(wb, &work); |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 912 | __writeback_inodes_wb(wb, &work); |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 913 | spin_unlock(&wb->list_lock); |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 914 | |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 915 | return nr_pages - work.nr_pages; |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 916 | } |
| 917 | |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 918 | static bool over_bground_thresh(struct bdi_writeback *wb) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 919 | { |
| 920 | unsigned long background_thresh, dirty_thresh; |
| 921 | |
Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 922 | global_dirty_limits(&background_thresh, &dirty_thresh); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 923 | |
Wu Fengguang | b00949a | 2010-11-18 14:38:33 -0600 | [diff] [blame] | 924 | if (global_page_state(NR_FILE_DIRTY) + |
| 925 | global_page_state(NR_UNSTABLE_NFS) > background_thresh) |
| 926 | return true; |
| 927 | |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 928 | if (wb_stat(wb, WB_RECLAIMABLE) > wb_dirty_limit(wb, background_thresh)) |
Wu Fengguang | b00949a | 2010-11-18 14:38:33 -0600 | [diff] [blame] | 929 | return true; |
| 930 | |
| 931 | return false; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 932 | } |
| 933 | |
| 934 | /* |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 935 | * Called under wb->list_lock. If there are multiple wb per bdi, |
| 936 | * only the flusher working on the first wb should do it. |
| 937 | */ |
| 938 | static void wb_update_bandwidth(struct bdi_writeback *wb, |
| 939 | unsigned long start_time) |
| 940 | { |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 941 | __wb_update_bandwidth(wb, 0, 0, 0, 0, 0, start_time); |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 942 | } |
| 943 | |
| 944 | /* |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 945 | * Explicit flushing or periodic writeback of "old" data. |
| 946 | * |
| 947 | * Define "old": the first time one of an inode's pages is dirtied, we mark the |
| 948 | * dirtying-time in the inode's address_space. So this periodic writeback code |
| 949 | * just walks the superblock inode list, writing back any inodes which are |
| 950 | * older than a specific point in time. |
| 951 | * |
| 952 | * Try to run once per dirty_writeback_interval. But if a writeback event |
| 953 | * takes longer than a dirty_writeback_interval interval, then leave a |
| 954 | * one-second gap. |
| 955 | * |
| 956 | * older_than_this takes precedence over nr_to_write. So we'll only write back |
| 957 | * all dirty pages if they are all attached to "old" mappings. |
| 958 | */ |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 959 | static long wb_writeback(struct bdi_writeback *wb, |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 960 | struct wb_writeback_work *work) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 961 | { |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 962 | unsigned long wb_start = jiffies; |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 963 | long nr_pages = work->nr_pages; |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 964 | unsigned long oldest_jif; |
Jan Kara | a5989bd | 2009-09-16 19:22:48 +0200 | [diff] [blame] | 965 | struct inode *inode; |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 966 | long progress; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 967 | |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 968 | oldest_jif = jiffies; |
| 969 | work->older_than_this = &oldest_jif; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 970 | |
Wu Fengguang | e8dfc30 | 2011-04-21 12:06:32 -0600 | [diff] [blame] | 971 | spin_lock(&wb->list_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 972 | for (;;) { |
| 973 | /* |
Wu Fengguang | d3ddec7 | 2009-09-23 20:33:40 +0800 | [diff] [blame] | 974 | * Stop writeback when nr_pages has been consumed |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 975 | */ |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 976 | if (work->nr_pages <= 0) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 977 | break; |
| 978 | |
| 979 | /* |
Jan Kara | aa373cf | 2011-01-13 15:45:47 -0800 | [diff] [blame] | 980 | * Background writeout and kupdate-style writeback may |
| 981 | * run forever. Stop them if there is other work to do |
| 982 | * so that e.g. sync can proceed. They'll be restarted |
| 983 | * after the other works are all done. |
| 984 | */ |
| 985 | if ((work->for_background || work->for_kupdate) && |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 986 | !list_empty(&wb->work_list)) |
Jan Kara | aa373cf | 2011-01-13 15:45:47 -0800 | [diff] [blame] | 987 | break; |
| 988 | |
| 989 | /* |
Wu Fengguang | d3ddec7 | 2009-09-23 20:33:40 +0800 | [diff] [blame] | 990 | * For background writeout, stop when we are below the |
| 991 | * background dirty threshold |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 992 | */ |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 993 | if (work->for_background && !over_bground_thresh(wb)) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 994 | break; |
| 995 | |
Jan Kara | 1bc36b6 | 2011-10-19 11:44:41 +0200 | [diff] [blame] | 996 | /* |
| 997 | * Kupdate and background works are special and we want to |
| 998 | * include all inodes that need writing. Livelock avoidance is |
| 999 | * handled by these works yielding to any other work so we are |
| 1000 | * safe. |
| 1001 | */ |
Wu Fengguang | ba9aa83 | 2010-07-21 20:32:30 -0600 | [diff] [blame] | 1002 | if (work->for_kupdate) { |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 1003 | oldest_jif = jiffies - |
Wu Fengguang | ba9aa83 | 2010-07-21 20:32:30 -0600 | [diff] [blame] | 1004 | msecs_to_jiffies(dirty_expire_interval * 10); |
Jan Kara | 1bc36b6 | 2011-10-19 11:44:41 +0200 | [diff] [blame] | 1005 | } else if (work->for_background) |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 1006 | oldest_jif = jiffies; |
Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 1007 | |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1008 | trace_writeback_start(wb->bdi, work); |
Wu Fengguang | e8dfc30 | 2011-04-21 12:06:32 -0600 | [diff] [blame] | 1009 | if (list_empty(&wb->b_io)) |
Curt Wohlgemuth | ad4e38d | 2011-10-07 21:51:56 -0600 | [diff] [blame] | 1010 | queue_io(wb, work); |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1011 | if (work->sb) |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1012 | progress = writeback_sb_inodes(work->sb, wb, work); |
Christoph Hellwig | edadfb1 | 2010-06-10 12:07:54 +0200 | [diff] [blame] | 1013 | else |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1014 | progress = __writeback_inodes_wb(wb, work); |
| 1015 | trace_writeback_written(wb->bdi, work); |
Dave Chinner | 028c2dd | 2010-07-07 13:24:07 +1000 | [diff] [blame] | 1016 | |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 1017 | wb_update_bandwidth(wb, wb_start); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1018 | |
| 1019 | /* |
Jens Axboe | 71fd05a | 2009-09-23 19:32:26 +0200 | [diff] [blame] | 1020 | * Did we write something? Try for more |
Wu Fengguang | e6fb6da | 2010-07-22 10:23:44 -0600 | [diff] [blame] | 1021 | * |
| 1022 | * Dirty inodes are moved to b_io for writeback in batches. |
| 1023 | * The completion of the current batch does not necessarily |
| 1024 | * mean the overall work is done. So we keep looping as long |
| 1025 | * as made some progress on cleaning pages or inodes. |
Jens Axboe | 71fd05a | 2009-09-23 19:32:26 +0200 | [diff] [blame] | 1026 | */ |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1027 | if (progress) |
Jens Axboe | 71fd05a | 2009-09-23 19:32:26 +0200 | [diff] [blame] | 1028 | continue; |
| 1029 | /* |
Wu Fengguang | e6fb6da | 2010-07-22 10:23:44 -0600 | [diff] [blame] | 1030 | * No more inodes for IO, bail |
Jens Axboe | 71fd05a | 2009-09-23 19:32:26 +0200 | [diff] [blame] | 1031 | */ |
Wu Fengguang | b7a2441 | 2010-07-21 22:19:51 -0600 | [diff] [blame] | 1032 | if (list_empty(&wb->b_more_io)) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1033 | break; |
| 1034 | /* |
Jens Axboe | 8010c3b | 2009-09-15 20:04:57 +0200 | [diff] [blame] | 1035 | * Nothing written. Wait for some inode to |
| 1036 | * become available for writeback. Otherwise |
| 1037 | * we'll just busyloop. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1038 | */ |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1039 | if (!list_empty(&wb->b_more_io)) { |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1040 | trace_writeback_wait(wb->bdi, work); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1041 | inode = wb_inode(wb->b_more_io.prev); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1042 | spin_lock(&inode->i_lock); |
Jan Kara | f0d07b7 | 2012-05-03 14:47:59 +0200 | [diff] [blame] | 1043 | spin_unlock(&wb->list_lock); |
Jan Kara | 169ebd9 | 2012-05-03 14:48:03 +0200 | [diff] [blame] | 1044 | /* This function drops i_lock... */ |
| 1045 | inode_sleep_on_writeback(inode); |
Jan Kara | f0d07b7 | 2012-05-03 14:47:59 +0200 | [diff] [blame] | 1046 | spin_lock(&wb->list_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1047 | } |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1048 | } |
Wu Fengguang | e8dfc30 | 2011-04-21 12:06:32 -0600 | [diff] [blame] | 1049 | spin_unlock(&wb->list_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1050 | |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 1051 | return nr_pages - work->nr_pages; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1052 | } |
| 1053 | |
| 1054 | /* |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1055 | * Return the next wb_writeback_work struct that hasn't been processed yet. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1056 | */ |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1057 | static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1058 | { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1059 | struct wb_writeback_work *work = NULL; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1060 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1061 | spin_lock_bh(&wb->work_lock); |
| 1062 | if (!list_empty(&wb->work_list)) { |
| 1063 | work = list_entry(wb->work_list.next, |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1064 | struct wb_writeback_work, list); |
| 1065 | list_del_init(&work->list); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1066 | } |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1067 | spin_unlock_bh(&wb->work_lock); |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1068 | return work; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1069 | } |
| 1070 | |
Linus Torvalds | cdf01dd | 2010-10-30 08:55:52 -0700 | [diff] [blame] | 1071 | /* |
| 1072 | * Add in the number of potentially dirty inodes, because each inode |
| 1073 | * write can dirty pagecache in the underlying blockdev. |
| 1074 | */ |
| 1075 | static unsigned long get_nr_dirty_pages(void) |
| 1076 | { |
| 1077 | return global_page_state(NR_FILE_DIRTY) + |
| 1078 | global_page_state(NR_UNSTABLE_NFS) + |
| 1079 | get_nr_dirty_inodes(); |
| 1080 | } |
| 1081 | |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 1082 | static long wb_check_background_flush(struct bdi_writeback *wb) |
| 1083 | { |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 1084 | if (over_bground_thresh(wb)) { |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 1085 | |
| 1086 | struct wb_writeback_work work = { |
| 1087 | .nr_pages = LONG_MAX, |
| 1088 | .sync_mode = WB_SYNC_NONE, |
| 1089 | .for_background = 1, |
| 1090 | .range_cyclic = 1, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1091 | .reason = WB_REASON_BACKGROUND, |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 1092 | }; |
| 1093 | |
| 1094 | return wb_writeback(wb, &work); |
| 1095 | } |
| 1096 | |
| 1097 | return 0; |
| 1098 | } |
| 1099 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1100 | static long wb_check_old_data_flush(struct bdi_writeback *wb) |
| 1101 | { |
| 1102 | unsigned long expired; |
| 1103 | long nr_pages; |
| 1104 | |
Jens Axboe | 69b62d0 | 2010-05-17 12:51:03 +0200 | [diff] [blame] | 1105 | /* |
| 1106 | * When set to zero, disable periodic writeback |
| 1107 | */ |
| 1108 | if (!dirty_writeback_interval) |
| 1109 | return 0; |
| 1110 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1111 | expired = wb->last_old_flush + |
| 1112 | msecs_to_jiffies(dirty_writeback_interval * 10); |
| 1113 | if (time_before(jiffies, expired)) |
| 1114 | return 0; |
| 1115 | |
| 1116 | wb->last_old_flush = jiffies; |
Linus Torvalds | cdf01dd | 2010-10-30 08:55:52 -0700 | [diff] [blame] | 1117 | nr_pages = get_nr_dirty_pages(); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1118 | |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 1119 | if (nr_pages) { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1120 | struct wb_writeback_work work = { |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 1121 | .nr_pages = nr_pages, |
| 1122 | .sync_mode = WB_SYNC_NONE, |
| 1123 | .for_kupdate = 1, |
| 1124 | .range_cyclic = 1, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1125 | .reason = WB_REASON_PERIODIC, |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 1126 | }; |
| 1127 | |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1128 | return wb_writeback(wb, &work); |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 1129 | } |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1130 | |
| 1131 | return 0; |
| 1132 | } |
| 1133 | |
| 1134 | /* |
| 1135 | * Retrieve work items and do the writeback they describe |
| 1136 | */ |
Wanpeng Li | 25d130b | 2013-07-08 16:00:14 -0700 | [diff] [blame] | 1137 | static long wb_do_writeback(struct bdi_writeback *wb) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1138 | { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1139 | struct wb_writeback_work *work; |
Jens Axboe | c4a77a6 | 2009-09-16 15:18:25 +0200 | [diff] [blame] | 1140 | long wrote = 0; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1141 | |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 1142 | set_bit(WB_writeback_running, &wb->state); |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1143 | while ((work = get_next_work_item(wb)) != NULL) { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1144 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1145 | trace_writeback_exec(wb->bdi, work); |
Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 1146 | |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1147 | wrote += wb_writeback(wb, work); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1148 | |
| 1149 | /* |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1150 | * Notify the caller of completion if this is a synchronous |
| 1151 | * work item, otherwise just free it. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1152 | */ |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1153 | if (work->done) |
| 1154 | complete(work->done); |
| 1155 | else |
| 1156 | kfree(work); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1157 | } |
| 1158 | |
| 1159 | /* |
| 1160 | * Check for periodic writeback, kupdated() style |
| 1161 | */ |
| 1162 | wrote += wb_check_old_data_flush(wb); |
Jan Kara | 6585027 | 2011-01-13 15:45:44 -0800 | [diff] [blame] | 1163 | wrote += wb_check_background_flush(wb); |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 1164 | clear_bit(WB_writeback_running, &wb->state); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1165 | |
| 1166 | return wrote; |
| 1167 | } |
| 1168 | |
| 1169 | /* |
| 1170 | * Handle writeback of dirty data for the device backed by this bdi. Also |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1171 | * reschedules periodically and does kupdated style flushing. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1172 | */ |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1173 | void wb_workfn(struct work_struct *work) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1174 | { |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1175 | struct bdi_writeback *wb = container_of(to_delayed_work(work), |
| 1176 | struct bdi_writeback, dwork); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1177 | long pages_written; |
| 1178 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1179 | set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); |
Peter Zijlstra | 766f916 | 2010-10-26 14:22:45 -0700 | [diff] [blame] | 1180 | current->flags |= PF_SWAPWRITE; |
Christoph Hellwig | 0824390 | 2010-06-19 23:08:22 +0200 | [diff] [blame] | 1181 | |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1182 | if (likely(!current_is_workqueue_rescuer() || |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 1183 | !test_bit(WB_registered, &wb->state))) { |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 1184 | /* |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1185 | * The normal path. Keep writing back @wb until its |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1186 | * work_list is empty. Note that this path is also taken |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1187 | * if @wb is shutting down even when we're running off the |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1188 | * rescuer as work_list needs to be drained. |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 1189 | */ |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1190 | do { |
Wanpeng Li | 25d130b | 2013-07-08 16:00:14 -0700 | [diff] [blame] | 1191 | pages_written = wb_do_writeback(wb); |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1192 | trace_writeback_pages_written(pages_written); |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1193 | } while (!list_empty(&wb->work_list)); |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1194 | } else { |
| 1195 | /* |
| 1196 | * bdi_wq can't get enough workers and we're running off |
| 1197 | * the emergency worker. Don't hog it. Hopefully, 1024 is |
| 1198 | * enough for efficient IO. |
| 1199 | */ |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1200 | pages_written = writeback_inodes_wb(wb, 1024, |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1201 | WB_REASON_FORKER_THREAD); |
Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 1202 | trace_writeback_pages_written(pages_written); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1203 | } |
| 1204 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1205 | if (!list_empty(&wb->work_list)) |
Derek Basehore | 6ca738d | 2014-04-03 14:46:22 -0700 | [diff] [blame] | 1206 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
| 1207 | else if (wb_has_dirty_io(wb) && dirty_writeback_interval) |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1208 | wb_wakeup_delayed(wb); |
Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 1209 | |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 1210 | current->flags &= ~PF_SWAPWRITE; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1211 | } |
| 1212 | |
| 1213 | /* |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1214 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
| 1215 | * the whole world. |
| 1216 | */ |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1217 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1218 | { |
Christoph Hellwig | b8c2f34 | 2010-06-08 18:15:07 +0200 | [diff] [blame] | 1219 | struct backing_dev_info *bdi; |
Christoph Hellwig | b8c2f34 | 2010-06-08 18:15:07 +0200 | [diff] [blame] | 1220 | |
Jan Kara | 47df3dd | 2013-09-11 14:22:22 -0700 | [diff] [blame] | 1221 | if (!nr_pages) |
| 1222 | nr_pages = get_nr_dirty_pages(); |
Christoph Hellwig | b8c2f34 | 2010-06-08 18:15:07 +0200 | [diff] [blame] | 1223 | |
| 1224 | rcu_read_lock(); |
Tejun Heo | f2b6512 | 2015-05-22 17:13:55 -0400 | [diff] [blame] | 1225 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
| 1226 | struct bdi_writeback *wb; |
| 1227 | struct wb_iter iter; |
| 1228 | |
| 1229 | if (!bdi_has_dirty_io(bdi)) |
| 1230 | continue; |
| 1231 | |
| 1232 | bdi_for_each_wb(wb, bdi, &iter, 0) |
| 1233 | wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), |
| 1234 | false, reason); |
| 1235 | } |
Christoph Hellwig | b8c2f34 | 2010-06-08 18:15:07 +0200 | [diff] [blame] | 1236 | rcu_read_unlock(); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1237 | } |
| 1238 | |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 1239 | /* |
| 1240 | * Wake up bdi's periodically to make sure dirtytime inodes gets |
| 1241 | * written back periodically. We deliberately do *not* check the |
| 1242 | * b_dirtytime list in wb_has_dirty_io(), since this would cause the |
| 1243 | * kernel to be constantly waking up once there are any dirtytime |
| 1244 | * inodes on the system. So instead we define a separate delayed work |
| 1245 | * function which gets called much more rarely. (By default, only |
| 1246 | * once every 12 hours.) |
| 1247 | * |
| 1248 | * If there is any other write activity going on in the file system, |
| 1249 | * this function won't be necessary. But if the only thing that has |
| 1250 | * happened on the file system is a dirtytime inode caused by an atime |
| 1251 | * update, we need this infrastructure below to make sure that inode |
| 1252 | * eventually gets pushed out to disk. |
| 1253 | */ |
| 1254 | static void wakeup_dirtytime_writeback(struct work_struct *w); |
| 1255 | static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); |
| 1256 | |
| 1257 | static void wakeup_dirtytime_writeback(struct work_struct *w) |
| 1258 | { |
| 1259 | struct backing_dev_info *bdi; |
| 1260 | |
| 1261 | rcu_read_lock(); |
| 1262 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
Tejun Heo | 001fe6f | 2015-05-22 17:13:56 -0400 | [diff] [blame^] | 1263 | struct bdi_writeback *wb; |
| 1264 | struct wb_iter iter; |
| 1265 | |
| 1266 | bdi_for_each_wb(wb, bdi, &iter, 0) |
| 1267 | if (!list_empty(&bdi->wb.b_dirty_time)) |
| 1268 | wb_wakeup(&bdi->wb); |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 1269 | } |
| 1270 | rcu_read_unlock(); |
| 1271 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); |
| 1272 | } |
| 1273 | |
| 1274 | static int __init start_dirtytime_writeback(void) |
| 1275 | { |
| 1276 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); |
| 1277 | return 0; |
| 1278 | } |
| 1279 | __initcall(start_dirtytime_writeback); |
| 1280 | |
Theodore Ts'o | 1efff91 | 2015-03-17 12:23:32 -0400 | [diff] [blame] | 1281 | int dirtytime_interval_handler(struct ctl_table *table, int write, |
| 1282 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 1283 | { |
| 1284 | int ret; |
| 1285 | |
| 1286 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 1287 | if (ret == 0 && write) |
| 1288 | mod_delayed_work(system_wq, &dirtytime_work, 0); |
| 1289 | return ret; |
| 1290 | } |
| 1291 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1292 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) |
| 1293 | { |
| 1294 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { |
| 1295 | struct dentry *dentry; |
| 1296 | const char *name = "?"; |
| 1297 | |
| 1298 | dentry = d_find_alias(inode); |
| 1299 | if (dentry) { |
| 1300 | spin_lock(&dentry->d_lock); |
| 1301 | name = (const char *) dentry->d_name.name; |
| 1302 | } |
| 1303 | printk(KERN_DEBUG |
| 1304 | "%s(%d): dirtied inode %lu (%s) on %s\n", |
| 1305 | current->comm, task_pid_nr(current), inode->i_ino, |
| 1306 | name, inode->i_sb->s_id); |
| 1307 | if (dentry) { |
| 1308 | spin_unlock(&dentry->d_lock); |
| 1309 | dput(dentry); |
| 1310 | } |
| 1311 | } |
| 1312 | } |
| 1313 | |
| 1314 | /** |
| 1315 | * __mark_inode_dirty - internal function |
| 1316 | * @inode: inode to mark |
| 1317 | * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) |
| 1318 | * Mark an inode as dirty. Callers should use mark_inode_dirty or |
| 1319 | * mark_inode_dirty_sync. |
| 1320 | * |
| 1321 | * Put the inode on the super block's dirty list. |
| 1322 | * |
| 1323 | * CAREFUL! We mark it dirty unconditionally, but move it onto the |
| 1324 | * dirty list only if it is hashed or if it refers to a blockdev. |
| 1325 | * If it was not hashed, it will never be added to the dirty list |
| 1326 | * even if it is later hashed, as it will have been marked dirty already. |
| 1327 | * |
| 1328 | * In short, make sure you hash any inodes _before_ you start marking |
| 1329 | * them dirty. |
| 1330 | * |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1331 | * Note that for blockdevs, inode->dirtied_when represents the dirtying time of |
| 1332 | * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of |
| 1333 | * the kernel-internal blockdev inode represents the dirtying time of the |
| 1334 | * blockdev's pages. This is why for I_DIRTY_PAGES we always use |
| 1335 | * page->mapping->host, so the page-dirtying time is recorded in the internal |
| 1336 | * blockdev inode. |
| 1337 | */ |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1338 | #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1339 | void __mark_inode_dirty(struct inode *inode, int flags) |
| 1340 | { |
| 1341 | struct super_block *sb = inode->i_sb; |
Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 1342 | struct backing_dev_info *bdi = NULL; |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1343 | int dirtytime; |
| 1344 | |
| 1345 | trace_writeback_mark_inode_dirty(inode, flags); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1346 | |
| 1347 | /* |
| 1348 | * Don't do this for I_DIRTY_PAGES - that doesn't actually |
| 1349 | * dirty the inode itself |
| 1350 | */ |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1351 | if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { |
Tejun Heo | 9fb0a7d | 2013-01-11 13:06:37 -0800 | [diff] [blame] | 1352 | trace_writeback_dirty_inode_start(inode, flags); |
| 1353 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1354 | if (sb->s_op->dirty_inode) |
Christoph Hellwig | aa38572 | 2011-05-27 06:53:02 -0400 | [diff] [blame] | 1355 | sb->s_op->dirty_inode(inode, flags); |
Tejun Heo | 9fb0a7d | 2013-01-11 13:06:37 -0800 | [diff] [blame] | 1356 | |
| 1357 | trace_writeback_dirty_inode(inode, flags); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1358 | } |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1359 | if (flags & I_DIRTY_INODE) |
| 1360 | flags &= ~I_DIRTY_TIME; |
| 1361 | dirtytime = flags & I_DIRTY_TIME; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1362 | |
| 1363 | /* |
Tejun Heo | 9c6ac78 | 2014-10-24 15:38:21 -0400 | [diff] [blame] | 1364 | * Paired with smp_mb() in __writeback_single_inode() for the |
| 1365 | * following lockless i_state test. See there for details. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1366 | */ |
| 1367 | smp_mb(); |
| 1368 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1369 | if (((inode->i_state & flags) == flags) || |
| 1370 | (dirtytime && (inode->i_state & I_DIRTY_INODE))) |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1371 | return; |
| 1372 | |
| 1373 | if (unlikely(block_dump)) |
| 1374 | block_dump___mark_inode_dirty(inode); |
| 1375 | |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1376 | spin_lock(&inode->i_lock); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1377 | if (dirtytime && (inode->i_state & I_DIRTY_INODE)) |
| 1378 | goto out_unlock_inode; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1379 | if ((inode->i_state & flags) != flags) { |
| 1380 | const int was_dirty = inode->i_state & I_DIRTY; |
| 1381 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 1382 | inode_attach_wb(inode, NULL); |
| 1383 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1384 | if (flags & I_DIRTY_INODE) |
| 1385 | inode->i_state &= ~I_DIRTY_TIME; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1386 | inode->i_state |= flags; |
| 1387 | |
| 1388 | /* |
| 1389 | * If the inode is being synced, just update its dirty state. |
| 1390 | * The unlocker will place the inode on the appropriate |
| 1391 | * superblock list, based upon its state. |
| 1392 | */ |
| 1393 | if (inode->i_state & I_SYNC) |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1394 | goto out_unlock_inode; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1395 | |
| 1396 | /* |
| 1397 | * Only add valid (hashed) inodes to the superblock's |
| 1398 | * dirty list. Add blockdev inodes as well. |
| 1399 | */ |
| 1400 | if (!S_ISBLK(inode->i_mode)) { |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 1401 | if (inode_unhashed(inode)) |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1402 | goto out_unlock_inode; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1403 | } |
Al Viro | a4ffdde | 2010-06-02 17:38:30 -0400 | [diff] [blame] | 1404 | if (inode->i_state & I_FREEING) |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1405 | goto out_unlock_inode; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1406 | |
| 1407 | /* |
| 1408 | * If the inode was already on b_dirty/b_io/b_more_io, don't |
| 1409 | * reposition it (that would break b_dirty time-ordering). |
| 1410 | */ |
| 1411 | if (!was_dirty) { |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1412 | struct list_head *dirty_list; |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 1413 | bool wakeup_bdi = false; |
Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 1414 | bdi = inode_to_bdi(inode); |
Jens Axboe | 500b067 | 2009-09-09 09:10:25 +0200 | [diff] [blame] | 1415 | |
Junxiao Bi | 146d700 | 2013-09-11 14:23:04 -0700 | [diff] [blame] | 1416 | spin_unlock(&inode->i_lock); |
| 1417 | spin_lock(&bdi->wb.list_lock); |
Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 1418 | |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1419 | WARN(bdi_cap_writeback_dirty(bdi) && |
| 1420 | !test_bit(WB_registered, &bdi->wb.state), |
| 1421 | "bdi-%s not registered\n", bdi->name); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1422 | |
| 1423 | inode->dirtied_when = jiffies; |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 1424 | if (dirtytime) |
| 1425 | inode->dirtied_time_when = jiffies; |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1426 | |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 1427 | if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1428 | dirty_list = &bdi->wb.b_dirty; |
Theodore Ts'o | a2f4870 | 2015-03-17 12:23:19 -0400 | [diff] [blame] | 1429 | else |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1430 | dirty_list = &bdi->wb.b_dirty_time; |
| 1431 | |
| 1432 | wakeup_bdi = inode_wb_list_move_locked(inode, &bdi->wb, |
| 1433 | dirty_list); |
| 1434 | |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 1435 | spin_unlock(&bdi->wb.list_lock); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 1436 | trace_writeback_dirty_inode_enqueue(inode); |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 1437 | |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 1438 | /* |
| 1439 | * If this is the first dirty inode for this bdi, |
| 1440 | * we have to wake-up the corresponding bdi thread |
| 1441 | * to make sure background write-back happens |
| 1442 | * later. |
| 1443 | */ |
| 1444 | if (bdi_cap_writeback_dirty(bdi) && wakeup_bdi) |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 1445 | wb_wakeup_delayed(&bdi->wb); |
Dave Chinner | a66979a | 2011-03-22 22:23:41 +1100 | [diff] [blame] | 1446 | return; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1447 | } |
| 1448 | } |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1449 | out_unlock_inode: |
| 1450 | spin_unlock(&inode->i_lock); |
Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 1451 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1452 | } |
| 1453 | EXPORT_SYMBOL(__mark_inode_dirty); |
| 1454 | |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 1455 | static void wait_sb_inodes(struct super_block *sb) |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 1456 | { |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1457 | struct inode *inode, *old_inode = NULL; |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 1458 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1459 | /* |
| 1460 | * We need to be protected against the filesystem going from |
| 1461 | * r/o to r/w or vice versa. |
| 1462 | */ |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 1463 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 1464 | |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 1465 | spin_lock(&inode_sb_list_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1466 | |
| 1467 | /* |
| 1468 | * Data integrity sync. Must wait for all pages under writeback, |
| 1469 | * because there may have been pages dirtied before our sync |
| 1470 | * call, but which had writeout started before we write it out. |
| 1471 | * In which case, the inode may not be on the dirty list, but |
| 1472 | * we still have to wait for that writeout. |
| 1473 | */ |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 1474 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1475 | struct address_space *mapping = inode->i_mapping; |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1476 | |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1477 | spin_lock(&inode->i_lock); |
| 1478 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || |
| 1479 | (mapping->nrpages == 0)) { |
| 1480 | spin_unlock(&inode->i_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1481 | continue; |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1482 | } |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1483 | __iget(inode); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 1484 | spin_unlock(&inode->i_lock); |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 1485 | spin_unlock(&inode_sb_list_lock); |
| 1486 | |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1487 | /* |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 1488 | * We hold a reference to 'inode' so it couldn't have been |
| 1489 | * removed from s_inodes list while we dropped the |
| 1490 | * inode_sb_list_lock. We cannot iput the inode now as we can |
| 1491 | * be holding the last reference and we cannot iput it under |
| 1492 | * inode_sb_list_lock. So we keep the reference and iput it |
| 1493 | * later. |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1494 | */ |
| 1495 | iput(old_inode); |
| 1496 | old_inode = inode; |
| 1497 | |
| 1498 | filemap_fdatawait(mapping); |
| 1499 | |
| 1500 | cond_resched(); |
Nick Piggin | 38f2197 | 2009-01-06 14:40:25 -0800 | [diff] [blame] | 1501 | |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 1502 | spin_lock(&inode_sb_list_lock); |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 1503 | } |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 1504 | spin_unlock(&inode_sb_list_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 1505 | iput(old_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | } |
| 1507 | |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1508 | /** |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1509 | * writeback_inodes_sb_nr - writeback dirty inodes from given super_block |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1510 | * @sb: the superblock |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1511 | * @nr: the number of pages to write |
Marcos Paulo de Souza | 786228a | 2011-11-23 20:56:45 +0800 | [diff] [blame] | 1512 | * @reason: reason why some writeback work initiated |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | * |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1514 | * Start writeback on some inodes on this super_block. No guarantees are made |
| 1515 | * on how many (if any) will be written, and this function does not wait |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1516 | * for IO completion of submitted IO. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | */ |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1518 | void writeback_inodes_sb_nr(struct super_block *sb, |
| 1519 | unsigned long nr, |
| 1520 | enum wb_reason reason) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1522 | DECLARE_COMPLETION_ONSTACK(done); |
| 1523 | struct wb_writeback_work work = { |
Wu Fengguang | 6e6938b | 2010-06-06 10:38:15 -0600 | [diff] [blame] | 1524 | .sb = sb, |
| 1525 | .sync_mode = WB_SYNC_NONE, |
| 1526 | .tagged_writepages = 1, |
| 1527 | .done = &done, |
| 1528 | .nr_pages = nr, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1529 | .reason = reason, |
Christoph Hellwig | 3c4d716 | 2010-06-08 18:14:43 +0200 | [diff] [blame] | 1530 | }; |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1531 | struct backing_dev_info *bdi = sb->s_bdi; |
Jens Axboe | 0e3c9a2 | 2010-06-01 11:08:43 +0200 | [diff] [blame] | 1532 | |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1533 | if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) |
Jan Kara | 6eedc70 | 2012-07-03 16:45:27 +0200 | [diff] [blame] | 1534 | return; |
Christoph Hellwig | cf37e97 | 2010-06-08 18:14:51 +0200 | [diff] [blame] | 1535 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1536 | wb_queue_work(&bdi->wb, &work); |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1537 | wait_for_completion(&done); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | } |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1539 | EXPORT_SYMBOL(writeback_inodes_sb_nr); |
| 1540 | |
| 1541 | /** |
| 1542 | * writeback_inodes_sb - writeback dirty inodes from given super_block |
| 1543 | * @sb: the superblock |
Marcos Paulo de Souza | 786228a | 2011-11-23 20:56:45 +0800 | [diff] [blame] | 1544 | * @reason: reason why some writeback work was initiated |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1545 | * |
| 1546 | * Start writeback on some inodes on this super_block. No guarantees are made |
| 1547 | * on how many (if any) will be written, and this function does not wait |
| 1548 | * for IO completion of submitted IO. |
| 1549 | */ |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1550 | void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1551 | { |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1552 | return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1553 | } |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1554 | EXPORT_SYMBOL(writeback_inodes_sb); |
| 1555 | |
| 1556 | /** |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1557 | * try_to_writeback_inodes_sb_nr - try to start writeback if none underway |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1558 | * @sb: the superblock |
| 1559 | * @nr: the number of pages to write |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1560 | * @reason: the reason of writeback |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1561 | * |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1562 | * Invoke writeback_inodes_sb_nr if no writeback is currently underway. |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1563 | * Returns 1 if writeback was started, 0 if not. |
| 1564 | */ |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1565 | int try_to_writeback_inodes_sb_nr(struct super_block *sb, |
| 1566 | unsigned long nr, |
| 1567 | enum wb_reason reason) |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1568 | { |
Tejun Heo | bc05873 | 2015-05-22 17:13:53 -0400 | [diff] [blame] | 1569 | if (writeback_in_progress(&sb->s_bdi->wb)) |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1570 | return 1; |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1571 | |
| 1572 | if (!down_read_trylock(&sb->s_umount)) |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1573 | return 0; |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1574 | |
| 1575 | writeback_inodes_sb_nr(sb, nr, reason); |
| 1576 | up_read(&sb->s_umount); |
| 1577 | return 1; |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1578 | } |
Miao Xie | 10ee27a | 2013-01-10 13:47:57 +0800 | [diff] [blame] | 1579 | EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); |
| 1580 | |
| 1581 | /** |
| 1582 | * try_to_writeback_inodes_sb - try to start writeback if none underway |
| 1583 | * @sb: the superblock |
| 1584 | * @reason: reason why some writeback work was initiated |
| 1585 | * |
| 1586 | * Implement by try_to_writeback_inodes_sb_nr() |
| 1587 | * Returns 1 if writeback was started, 0 if not. |
| 1588 | */ |
| 1589 | int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) |
| 1590 | { |
| 1591 | return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); |
| 1592 | } |
| 1593 | EXPORT_SYMBOL(try_to_writeback_inodes_sb); |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 1594 | |
| 1595 | /** |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1596 | * sync_inodes_sb - sync sb inode pages |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 1597 | * @sb: the superblock |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1598 | * |
| 1599 | * This function writes and waits on any dirty inode belonging to this |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 1600 | * super_block. |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1601 | */ |
Jan Kara | 0dc83bd | 2014-02-21 11:19:04 +0100 | [diff] [blame] | 1602 | void sync_inodes_sb(struct super_block *sb) |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1603 | { |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1604 | DECLARE_COMPLETION_ONSTACK(done); |
| 1605 | struct wb_writeback_work work = { |
Christoph Hellwig | 3c4d716 | 2010-06-08 18:14:43 +0200 | [diff] [blame] | 1606 | .sb = sb, |
| 1607 | .sync_mode = WB_SYNC_ALL, |
| 1608 | .nr_pages = LONG_MAX, |
| 1609 | .range_cyclic = 0, |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1610 | .done = &done, |
Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 1611 | .reason = WB_REASON_SYNC, |
Dave Chinner | 7747bd4 | 2013-07-02 22:38:35 +1000 | [diff] [blame] | 1612 | .for_sync = 1, |
Christoph Hellwig | 3c4d716 | 2010-06-08 18:14:43 +0200 | [diff] [blame] | 1613 | }; |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1614 | struct backing_dev_info *bdi = sb->s_bdi; |
Christoph Hellwig | 3c4d716 | 2010-06-08 18:14:43 +0200 | [diff] [blame] | 1615 | |
Jan Kara | 6eedc70 | 2012-07-03 16:45:27 +0200 | [diff] [blame] | 1616 | /* Nothing to do? */ |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1617 | if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) |
Jan Kara | 6eedc70 | 2012-07-03 16:45:27 +0200 | [diff] [blame] | 1618 | return; |
Christoph Hellwig | cf37e97 | 2010-06-08 18:14:51 +0200 | [diff] [blame] | 1619 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
| 1620 | |
Tejun Heo | e797291 | 2015-05-22 17:13:48 -0400 | [diff] [blame] | 1621 | wb_queue_work(&bdi->wb, &work); |
Christoph Hellwig | 83ba7b0 | 2010-07-06 08:59:53 +0200 | [diff] [blame] | 1622 | wait_for_completion(&done); |
| 1623 | |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 1624 | wait_sb_inodes(sb); |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 1625 | } |
| 1626 | EXPORT_SYMBOL(sync_inodes_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1628 | /** |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 1629 | * write_inode_now - write an inode to disk |
| 1630 | * @inode: inode to write to disk |
| 1631 | * @sync: whether the write should be synchronous or not |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | * |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 1633 | * This function commits an inode to disk immediately if it is dirty. This is |
| 1634 | * primarily needed by knfsd. |
| 1635 | * |
| 1636 | * The caller must either have a ref on the inode or must have set I_WILL_FREE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | int write_inode_now(struct inode *inode, int sync) |
| 1639 | { |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 1640 | struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | struct writeback_control wbc = { |
| 1642 | .nr_to_write = LONG_MAX, |
Mike Galbraith | 18914b1 | 2008-02-08 04:20:23 -0800 | [diff] [blame] | 1643 | .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 1644 | .range_start = 0, |
| 1645 | .range_end = LLONG_MAX, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | }; |
| 1647 | |
| 1648 | if (!mapping_cap_writeback_dirty(inode->i_mapping)) |
Andrew Morton | 49364ce | 2005-11-07 00:59:15 -0800 | [diff] [blame] | 1649 | wbc.nr_to_write = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | |
| 1651 | might_sleep(); |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 1652 | return writeback_single_inode(inode, wb, &wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | } |
| 1654 | EXPORT_SYMBOL(write_inode_now); |
| 1655 | |
| 1656 | /** |
| 1657 | * sync_inode - write an inode and its pages to disk. |
| 1658 | * @inode: the inode to sync |
| 1659 | * @wbc: controls the writeback mode |
| 1660 | * |
| 1661 | * sync_inode() will write an inode and its pages to disk. It will also |
| 1662 | * correctly update the inode on its superblock's dirty inode lists and will |
| 1663 | * update inode->i_state. |
| 1664 | * |
| 1665 | * The caller must have a ref on the inode. |
| 1666 | */ |
| 1667 | int sync_inode(struct inode *inode, struct writeback_control *wbc) |
| 1668 | { |
Jan Kara | 4f8ad65 | 2012-05-03 14:48:00 +0200 | [diff] [blame] | 1669 | return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 | } |
| 1671 | EXPORT_SYMBOL(sync_inode); |
Christoph Hellwig | c3765016 | 2010-10-06 10:48:20 +0200 | [diff] [blame] | 1672 | |
| 1673 | /** |
Andrew Morton | c691b9d | 2011-01-13 15:45:48 -0800 | [diff] [blame] | 1674 | * sync_inode_metadata - write an inode to disk |
Christoph Hellwig | c3765016 | 2010-10-06 10:48:20 +0200 | [diff] [blame] | 1675 | * @inode: the inode to sync |
| 1676 | * @wait: wait for I/O to complete. |
| 1677 | * |
Andrew Morton | c691b9d | 2011-01-13 15:45:48 -0800 | [diff] [blame] | 1678 | * Write an inode to disk and adjust its dirty state after completion. |
Christoph Hellwig | c3765016 | 2010-10-06 10:48:20 +0200 | [diff] [blame] | 1679 | * |
| 1680 | * Note: only writes the actual inode, no associated data or other metadata. |
| 1681 | */ |
| 1682 | int sync_inode_metadata(struct inode *inode, int wait) |
| 1683 | { |
| 1684 | struct writeback_control wbc = { |
| 1685 | .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, |
| 1686 | .nr_to_write = 0, /* metadata-only */ |
| 1687 | }; |
| 1688 | |
| 1689 | return sync_inode(inode, &wbc); |
| 1690 | } |
| 1691 | EXPORT_SYMBOL(sync_inode_metadata); |