Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 2 | |
| 3 | #include <linux/wait.h> |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 4 | #include <linux/rbtree.h> |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 5 | #include <linux/backing-dev.h> |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 6 | #include <linux/kthread.h> |
| 7 | #include <linux/freezer.h> |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 8 | #include <linux/fs.h> |
Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 9 | #include <linux/pagemap.h> |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> |
Daniel Vetter | c1ca59a | 2021-02-25 17:18:45 -0800 | [diff] [blame] | 11 | #include <linux/sched/mm.h> |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 12 | #include <linux/sched.h> |
| 13 | #include <linux/module.h> |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 14 | #include <linux/writeback.h> |
| 15 | #include <linux/device.h> |
Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 16 | #include <trace/events/writeback.h> |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 17 | |
Christoph Hellwig | f56753a | 2020-09-24 08:51:40 +0200 | [diff] [blame] | 18 | struct backing_dev_info noop_backing_dev_info; |
Tejun Heo | a212b10 | 2015-05-22 17:13:33 -0400 | [diff] [blame] | 19 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); |
Jörn Engel | 5129a46 | 2010-04-25 08:54:42 +0200 | [diff] [blame] | 20 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 21 | static struct class *bdi_class; |
Christoph Hellwig | eb7ae5e | 2020-05-04 14:47:54 +0200 | [diff] [blame] | 22 | static const char *bdi_unknown_name = "(unknown)"; |
Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 23 | |
| 24 | /* |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 25 | * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU |
| 26 | * reader side locking. |
Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 27 | */ |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 28 | DEFINE_SPINLOCK(bdi_lock); |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 29 | static u64 bdi_id_cursor; |
| 30 | static struct rb_root bdi_tree = RB_ROOT; |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 31 | LIST_HEAD(bdi_list); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 32 | |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 33 | /* bdi_wq serves all asynchronous writeback tasks */ |
| 34 | struct workqueue_struct *bdi_wq; |
| 35 | |
Baolin Wang | 6986c3e | 2021-02-24 12:02:52 -0800 | [diff] [blame] | 36 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
| 37 | |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 38 | #ifdef CONFIG_DEBUG_FS |
| 39 | #include <linux/debugfs.h> |
| 40 | #include <linux/seq_file.h> |
| 41 | |
| 42 | static struct dentry *bdi_debug_root; |
| 43 | |
| 44 | static void bdi_debug_init(void) |
| 45 | { |
| 46 | bdi_debug_root = debugfs_create_dir("bdi", NULL); |
| 47 | } |
| 48 | |
| 49 | static int bdi_debug_stats_show(struct seq_file *m, void *v) |
| 50 | { |
| 51 | struct backing_dev_info *bdi = m->private; |
Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 52 | struct bdi_writeback *wb = &bdi->wb; |
David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 53 | unsigned long background_thresh; |
| 54 | unsigned long dirty_thresh; |
Tejun Heo | 0d960a3 | 2015-05-22 18:23:19 -0400 | [diff] [blame] | 55 | unsigned long wb_thresh; |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 56 | unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; |
Jens Axboe | f09b00d | 2009-05-25 09:08:21 +0200 | [diff] [blame] | 57 | struct inode *inode; |
| 58 | |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 59 | nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 60 | spin_lock(&wb->list_lock); |
Dave Chinner | c7f5408 | 2015-03-04 14:07:22 -0500 | [diff] [blame] | 61 | list_for_each_entry(inode, &wb->b_dirty, i_io_list) |
Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 62 | nr_dirty++; |
Dave Chinner | c7f5408 | 2015-03-04 14:07:22 -0500 | [diff] [blame] | 63 | list_for_each_entry(inode, &wb->b_io, i_io_list) |
Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 64 | nr_io++; |
Dave Chinner | c7f5408 | 2015-03-04 14:07:22 -0500 | [diff] [blame] | 65 | list_for_each_entry(inode, &wb->b_more_io, i_io_list) |
Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 66 | nr_more_io++; |
Dave Chinner | c7f5408 | 2015-03-04 14:07:22 -0500 | [diff] [blame] | 67 | list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 68 | if (inode->i_state & I_DIRTY_TIME) |
| 69 | nr_dirty_time++; |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 70 | spin_unlock(&wb->list_lock); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 71 | |
Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 72 | global_dirty_limits(&background_thresh, &dirty_thresh); |
Tejun Heo | 0d960a3 | 2015-05-22 18:23:19 -0400 | [diff] [blame] | 73 | wb_thresh = wb_calc_thresh(wb, dirty_thresh); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 74 | |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 75 | seq_printf(m, |
Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 76 | "BdiWriteback: %10lu kB\n" |
| 77 | "BdiReclaimable: %10lu kB\n" |
| 78 | "BdiDirtyThresh: %10lu kB\n" |
| 79 | "DirtyThresh: %10lu kB\n" |
| 80 | "BackgroundThresh: %10lu kB\n" |
Wu Fengguang | c8e28ce | 2011-01-23 10:07:47 -0600 | [diff] [blame] | 81 | "BdiDirtied: %10lu kB\n" |
Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 82 | "BdiWritten: %10lu kB\n" |
| 83 | "BdiWriteBandwidth: %10lu kBps\n" |
| 84 | "b_dirty: %10lu\n" |
| 85 | "b_io: %10lu\n" |
| 86 | "b_more_io: %10lu\n" |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 87 | "b_dirty_time: %10lu\n" |
Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 88 | "bdi_list: %10u\n" |
| 89 | "state: %10lx\n", |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 90 | (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), |
| 91 | (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), |
Tejun Heo | 0d960a3 | 2015-05-22 18:23:19 -0400 | [diff] [blame] | 92 | K(wb_thresh), |
Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 93 | K(dirty_thresh), |
| 94 | K(background_thresh), |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 95 | (unsigned long) K(wb_stat(wb, WB_DIRTIED)), |
| 96 | (unsigned long) K(wb_stat(wb, WB_WRITTEN)), |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 97 | (unsigned long) K(wb->write_bandwidth), |
Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 98 | nr_dirty, |
| 99 | nr_io, |
| 100 | nr_more_io, |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 101 | nr_dirty_time, |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 102 | !list_empty(&bdi->bdi_list), bdi->wb.state); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 103 | |
| 104 | return 0; |
| 105 | } |
Andy Shevchenko | 5ad3509 | 2018-04-05 16:23:16 -0700 | [diff] [blame] | 106 | DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 107 | |
Greg Kroah-Hartman | 2d146b9 | 2019-01-22 16:21:07 +0100 | [diff] [blame] | 108 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 109 | { |
| 110 | bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); |
weiping zhang | 97f0769 | 2017-10-31 18:37:54 +0800 | [diff] [blame] | 111 | |
Greg Kroah-Hartman | 2d146b9 | 2019-01-22 16:21:07 +0100 | [diff] [blame] | 112 | debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, |
| 113 | &bdi_debug_stats_fops); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static void bdi_debug_unregister(struct backing_dev_info *bdi) |
| 117 | { |
Greg Kroah-Hartman | 2d146b9 | 2019-01-22 16:21:07 +0100 | [diff] [blame] | 118 | debugfs_remove_recursive(bdi->debug_dir); |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 119 | } |
| 120 | #else |
| 121 | static inline void bdi_debug_init(void) |
| 122 | { |
| 123 | } |
Greg Kroah-Hartman | 2d146b9 | 2019-01-22 16:21:07 +0100 | [diff] [blame] | 124 | static inline void bdi_debug_register(struct backing_dev_info *bdi, |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 125 | const char *name) |
| 126 | { |
| 127 | } |
| 128 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) |
| 129 | { |
| 130 | } |
| 131 | #endif |
| 132 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 133 | static ssize_t read_ahead_kb_store(struct device *dev, |
| 134 | struct device_attribute *attr, |
| 135 | const char *buf, size_t count) |
| 136 | { |
| 137 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 138 | unsigned long read_ahead_kb; |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 139 | ssize_t ret; |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 140 | |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 141 | ret = kstrtoul(buf, 10, &read_ahead_kb); |
| 142 | if (ret < 0) |
| 143 | return ret; |
| 144 | |
| 145 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); |
| 146 | |
| 147 | return count; |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 150 | #define BDI_SHOW(name, expr) \ |
| 151 | static ssize_t name##_show(struct device *dev, \ |
Joe Perches | 5e4c0d8 | 2020-12-14 19:14:50 -0800 | [diff] [blame] | 152 | struct device_attribute *attr, char *buf) \ |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 153 | { \ |
| 154 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ |
| 155 | \ |
Joe Perches | 5e4c0d8 | 2020-12-14 19:14:50 -0800 | [diff] [blame] | 156 | return sysfs_emit(buf, "%lld\n", (long long)expr); \ |
Greg Kroah-Hartman | d9e1241 | 2013-07-24 15:05:26 -0700 | [diff] [blame] | 157 | } \ |
| 158 | static DEVICE_ATTR_RW(name); |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 159 | |
| 160 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) |
| 161 | |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 162 | static ssize_t min_ratio_store(struct device *dev, |
| 163 | struct device_attribute *attr, const char *buf, size_t count) |
| 164 | { |
| 165 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 166 | unsigned int ratio; |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 167 | ssize_t ret; |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 168 | |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 169 | ret = kstrtouint(buf, 10, &ratio); |
| 170 | if (ret < 0) |
| 171 | return ret; |
| 172 | |
| 173 | ret = bdi_set_min_ratio(bdi, ratio); |
| 174 | if (!ret) |
| 175 | ret = count; |
| 176 | |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 177 | return ret; |
| 178 | } |
| 179 | BDI_SHOW(min_ratio, bdi->min_ratio) |
| 180 | |
Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 181 | static ssize_t max_ratio_store(struct device *dev, |
| 182 | struct device_attribute *attr, const char *buf, size_t count) |
| 183 | { |
| 184 | struct backing_dev_info *bdi = dev_get_drvdata(dev); |
Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 185 | unsigned int ratio; |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 186 | ssize_t ret; |
Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 187 | |
Namjae Jeon | 7034ed1 | 2012-08-25 16:57:27 +0800 | [diff] [blame] | 188 | ret = kstrtouint(buf, 10, &ratio); |
| 189 | if (ret < 0) |
| 190 | return ret; |
| 191 | |
| 192 | ret = bdi_set_max_ratio(bdi, ratio); |
| 193 | if (!ret) |
| 194 | ret = count; |
| 195 | |
Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 196 | return ret; |
| 197 | } |
| 198 | BDI_SHOW(max_ratio, bdi->max_ratio) |
| 199 | |
Darrick J. Wong | 7d311cd | 2013-02-21 16:42:48 -0800 | [diff] [blame] | 200 | static ssize_t stable_pages_required_show(struct device *dev, |
| 201 | struct device_attribute *attr, |
Joe Perches | 5e4c0d8 | 2020-12-14 19:14:50 -0800 | [diff] [blame] | 202 | char *buf) |
Darrick J. Wong | 7d311cd | 2013-02-21 16:42:48 -0800 | [diff] [blame] | 203 | { |
Christoph Hellwig | 1cb039f | 2020-09-24 08:51:38 +0200 | [diff] [blame] | 204 | dev_warn_once(dev, |
| 205 | "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n"); |
Joe Perches | 5e4c0d8 | 2020-12-14 19:14:50 -0800 | [diff] [blame] | 206 | return sysfs_emit(buf, "%d\n", 0); |
Darrick J. Wong | 7d311cd | 2013-02-21 16:42:48 -0800 | [diff] [blame] | 207 | } |
Greg Kroah-Hartman | d9e1241 | 2013-07-24 15:05:26 -0700 | [diff] [blame] | 208 | static DEVICE_ATTR_RO(stable_pages_required); |
Darrick J. Wong | 7d311cd | 2013-02-21 16:42:48 -0800 | [diff] [blame] | 209 | |
Greg Kroah-Hartman | d9e1241 | 2013-07-24 15:05:26 -0700 | [diff] [blame] | 210 | static struct attribute *bdi_dev_attrs[] = { |
| 211 | &dev_attr_read_ahead_kb.attr, |
| 212 | &dev_attr_min_ratio.attr, |
| 213 | &dev_attr_max_ratio.attr, |
| 214 | &dev_attr_stable_pages_required.attr, |
| 215 | NULL, |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 216 | }; |
Greg Kroah-Hartman | d9e1241 | 2013-07-24 15:05:26 -0700 | [diff] [blame] | 217 | ATTRIBUTE_GROUPS(bdi_dev); |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 218 | |
| 219 | static __init int bdi_class_init(void) |
| 220 | { |
| 221 | bdi_class = class_create(THIS_MODULE, "bdi"); |
Anton Blanchard | 1442145 | 2010-04-02 09:46:55 +0200 | [diff] [blame] | 222 | if (IS_ERR(bdi_class)) |
| 223 | return PTR_ERR(bdi_class); |
| 224 | |
Greg Kroah-Hartman | d9e1241 | 2013-07-24 15:05:26 -0700 | [diff] [blame] | 225 | bdi_class->dev_groups = bdi_dev_groups; |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 226 | bdi_debug_init(); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 227 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 228 | return 0; |
| 229 | } |
Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 230 | postcore_initcall(bdi_class_init); |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 231 | |
Jan Kara | 2e82b84 | 2017-04-12 12:24:48 +0200 | [diff] [blame] | 232 | static int bdi_init(struct backing_dev_info *bdi); |
| 233 | |
Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 234 | static int __init default_bdi_init(void) |
| 235 | { |
| 236 | int err; |
| 237 | |
Mika Westerberg | a2b90f1 | 2019-10-04 13:00:24 +0300 | [diff] [blame] | 238 | bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | |
| 239 | WQ_SYSFS, 0); |
Tejun Heo | 839a8e8 | 2013-04-01 19:08:06 -0700 | [diff] [blame] | 240 | if (!bdi_wq) |
| 241 | return -ENOMEM; |
| 242 | |
Jan Kara | 976e48f | 2010-09-21 11:48:55 +0200 | [diff] [blame] | 243 | err = bdi_init(&noop_backing_dev_info); |
Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 244 | |
| 245 | return err; |
| 246 | } |
| 247 | subsys_initcall(default_bdi_init); |
| 248 | |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 249 | /* |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 250 | * This function is used when the first inode for this wb is marked dirty. It |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 251 | * wakes-up the corresponding bdi thread which should then take care of the |
| 252 | * periodic background write-out of dirty inodes. Since the write-out would |
| 253 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just |
| 254 | * set up a timer which wakes the bdi thread up later. |
| 255 | * |
| 256 | * Note, we wouldn't bother setting up the timer, but this function is on the |
| 257 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches |
| 258 | * by delaying the wake-up. |
Derek Basehore | 6ca738d | 2014-04-03 14:46:22 -0700 | [diff] [blame] | 259 | * |
| 260 | * We have to be careful not to postpone flush work if it is scheduled for |
| 261 | * earlier. Thus we use queue_delayed_work(). |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 262 | */ |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 263 | void wb_wakeup_delayed(struct bdi_writeback *wb) |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 264 | { |
| 265 | unsigned long timeout; |
| 266 | |
| 267 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 268 | spin_lock_bh(&wb->work_lock); |
| 269 | if (test_bit(WB_registered, &wb->state)) |
| 270 | queue_delayed_work(bdi_wq, &wb->dwork, timeout); |
| 271 | spin_unlock_bh(&wb->work_lock); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 272 | } |
| 273 | |
Jan Kara | 45a2966 | 2021-09-02 14:53:09 -0700 | [diff] [blame] | 274 | static void wb_update_bandwidth_workfn(struct work_struct *work) |
| 275 | { |
| 276 | struct bdi_writeback *wb = container_of(to_delayed_work(work), |
| 277 | struct bdi_writeback, bw_dwork); |
| 278 | |
| 279 | wb_update_bandwidth(wb); |
| 280 | } |
| 281 | |
Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 282 | /* |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 283 | * Initial write bandwidth: 100 MB/s |
Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 284 | */ |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 285 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) |
Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 286 | |
Tejun Heo | 8395cd9 | 2015-05-22 17:13:34 -0400 | [diff] [blame] | 287 | static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 288 | gfp_t gfp) |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 289 | { |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 290 | int i, err; |
| 291 | |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 292 | memset(wb, 0, sizeof(*wb)); |
| 293 | |
Jan Kara | 810df54 | 2017-03-23 01:36:55 +0100 | [diff] [blame] | 294 | if (wb != &bdi->wb) |
| 295 | bdi_get(bdi); |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 296 | wb->bdi = bdi; |
| 297 | wb->last_old_flush = jiffies; |
| 298 | INIT_LIST_HEAD(&wb->b_dirty); |
| 299 | INIT_LIST_HEAD(&wb->b_io); |
| 300 | INIT_LIST_HEAD(&wb->b_more_io); |
Theodore Ts'o | 0ae45f6 | 2015-02-02 00:37:00 -0500 | [diff] [blame] | 301 | INIT_LIST_HEAD(&wb->b_dirty_time); |
Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 302 | spin_lock_init(&wb->list_lock); |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 303 | |
Jan Kara | 633a2ab | 2021-09-02 14:53:04 -0700 | [diff] [blame] | 304 | atomic_set(&wb->writeback_inodes, 0); |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 305 | wb->bw_time_stamp = jiffies; |
| 306 | wb->balanced_dirty_ratelimit = INIT_BW; |
| 307 | wb->dirty_ratelimit = INIT_BW; |
| 308 | wb->write_bandwidth = INIT_BW; |
| 309 | wb->avg_write_bandwidth = INIT_BW; |
| 310 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 311 | spin_lock_init(&wb->work_lock); |
| 312 | INIT_LIST_HEAD(&wb->work_list); |
| 313 | INIT_DELAYED_WORK(&wb->dwork, wb_workfn); |
Jan Kara | 45a2966 | 2021-09-02 14:53:09 -0700 | [diff] [blame] | 314 | INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); |
Jens Axboe | b57d74a | 2016-09-01 10:20:33 -0600 | [diff] [blame] | 315 | wb->dirty_sleep = jiffies; |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 316 | |
Tejun Heo | 8395cd9 | 2015-05-22 17:13:34 -0400 | [diff] [blame] | 317 | err = fprop_local_init_percpu(&wb->completions, gfp); |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 318 | if (err) |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 319 | goto out_put_bdi; |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 320 | |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 321 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) { |
Tejun Heo | 8395cd9 | 2015-05-22 17:13:34 -0400 | [diff] [blame] | 322 | err = percpu_counter_init(&wb->stat[i], 0, gfp); |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 323 | if (err) |
| 324 | goto out_destroy_stat; |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | return 0; |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 328 | |
| 329 | out_destroy_stat: |
Rasmus Villemoes | 078c6c3 | 2016-02-11 16:13:06 -0800 | [diff] [blame] | 330 | while (i--) |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 331 | percpu_counter_destroy(&wb->stat[i]); |
| 332 | fprop_local_destroy_percpu(&wb->completions); |
Jan Kara | 810df54 | 2017-03-23 01:36:55 +0100 | [diff] [blame] | 333 | out_put_bdi: |
| 334 | if (wb != &bdi->wb) |
| 335 | bdi_put(bdi); |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 336 | return err; |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 337 | } |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 338 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 339 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); |
| 340 | |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 341 | /* |
| 342 | * Remove bdi from the global list and shutdown any threads we have running |
| 343 | */ |
| 344 | static void wb_shutdown(struct bdi_writeback *wb) |
| 345 | { |
| 346 | /* Make sure nobody queues further work */ |
| 347 | spin_lock_bh(&wb->work_lock); |
| 348 | if (!test_and_clear_bit(WB_registered, &wb->state)) { |
| 349 | spin_unlock_bh(&wb->work_lock); |
| 350 | return; |
| 351 | } |
| 352 | spin_unlock_bh(&wb->work_lock); |
| 353 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 354 | cgwb_remove_from_bdi_list(wb); |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 355 | /* |
| 356 | * Drain work list and shutdown the delayed_work. !WB_registered |
| 357 | * tells wb_workfn() that @wb is dying and its work_list needs to |
| 358 | * be drained no matter what. |
| 359 | */ |
| 360 | mod_delayed_work(bdi_wq, &wb->dwork, 0); |
| 361 | flush_delayed_work(&wb->dwork); |
| 362 | WARN_ON(!list_empty(&wb->work_list)); |
Jan Kara | 45a2966 | 2021-09-02 14:53:09 -0700 | [diff] [blame] | 363 | flush_delayed_work(&wb->bw_dwork); |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 364 | } |
| 365 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 366 | static void wb_exit(struct bdi_writeback *wb) |
Tejun Heo | 93f78d8 | 2015-05-22 17:13:27 -0400 | [diff] [blame] | 367 | { |
| 368 | int i; |
| 369 | |
| 370 | WARN_ON(delayed_work_pending(&wb->dwork)); |
| 371 | |
| 372 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) |
| 373 | percpu_counter_destroy(&wb->stat[i]); |
Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 374 | |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 375 | fprop_local_destroy_percpu(&wb->completions); |
Jan Kara | 810df54 | 2017-03-23 01:36:55 +0100 | [diff] [blame] | 376 | if (wb != &wb->bdi->wb) |
| 377 | bdi_put(wb->bdi); |
Tejun Heo | a88a341 | 2015-05-22 17:13:28 -0400 | [diff] [blame] | 378 | } |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 379 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 380 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 381 | |
| 382 | #include <linux/memcontrol.h> |
| 383 | |
| 384 | /* |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 385 | * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and |
| 386 | * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 387 | */ |
| 388 | static DEFINE_SPINLOCK(cgwb_lock); |
Tejun Heo | f183464 | 2018-05-23 10:56:32 -0700 | [diff] [blame] | 389 | static struct workqueue_struct *cgwb_release_wq; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 390 | |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 391 | static LIST_HEAD(offline_cgwbs); |
| 392 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work); |
| 393 | static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); |
| 394 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 395 | static void cgwb_release_workfn(struct work_struct *work) |
| 396 | { |
| 397 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, |
| 398 | release_work); |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 399 | struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 400 | |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 401 | mutex_lock(&wb->bdi->cgwb_release_mutex); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 402 | wb_shutdown(wb); |
| 403 | |
| 404 | css_put(wb->memcg_css); |
| 405 | css_put(wb->blkcg_css); |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 406 | mutex_unlock(&wb->bdi->cgwb_release_mutex); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 407 | |
Tejun Heo | d866dbf | 2019-07-24 10:37:22 -0700 | [diff] [blame] | 408 | /* triggers blkg destruction if no online users left */ |
| 409 | blkcg_unpin_online(blkcg); |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 410 | |
Tejun Heo | 841710a | 2015-05-22 18:23:33 -0400 | [diff] [blame] | 411 | fprop_local_destroy_percpu(&wb->memcg_completions); |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 412 | |
| 413 | spin_lock_irq(&cgwb_lock); |
| 414 | list_del(&wb->offline_node); |
| 415 | spin_unlock_irq(&cgwb_lock); |
| 416 | |
Roman Gushchin | b43a9e7 | 2021-07-23 15:50:29 -0700 | [diff] [blame] | 417 | percpu_ref_exit(&wb->refcnt); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 418 | wb_exit(wb); |
Roman Gushchin | f3b6a6d | 2021-06-28 19:35:53 -0700 | [diff] [blame] | 419 | WARN_ON_ONCE(!list_empty(&wb->b_attached)); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 420 | kfree_rcu(wb, rcu); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 421 | } |
| 422 | |
| 423 | static void cgwb_release(struct percpu_ref *refcnt) |
| 424 | { |
| 425 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, |
| 426 | refcnt); |
Tejun Heo | f183464 | 2018-05-23 10:56:32 -0700 | [diff] [blame] | 427 | queue_work(cgwb_release_wq, &wb->release_work); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | static void cgwb_kill(struct bdi_writeback *wb) |
| 431 | { |
| 432 | lockdep_assert_held(&cgwb_lock); |
| 433 | |
| 434 | WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); |
| 435 | list_del(&wb->memcg_node); |
| 436 | list_del(&wb->blkcg_node); |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 437 | list_add(&wb->offline_node, &offline_cgwbs); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 438 | percpu_ref_kill(&wb->refcnt); |
| 439 | } |
| 440 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 441 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) |
| 442 | { |
| 443 | spin_lock_irq(&cgwb_lock); |
| 444 | list_del_rcu(&wb->bdi_node); |
| 445 | spin_unlock_irq(&cgwb_lock); |
| 446 | } |
| 447 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 448 | static int cgwb_create(struct backing_dev_info *bdi, |
| 449 | struct cgroup_subsys_state *memcg_css, gfp_t gfp) |
| 450 | { |
| 451 | struct mem_cgroup *memcg; |
| 452 | struct cgroup_subsys_state *blkcg_css; |
| 453 | struct blkcg *blkcg; |
| 454 | struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; |
| 455 | struct bdi_writeback *wb; |
| 456 | unsigned long flags; |
| 457 | int ret = 0; |
| 458 | |
| 459 | memcg = mem_cgroup_from_css(memcg_css); |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 460 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 461 | blkcg = css_to_blkcg(blkcg_css); |
Wang Long | 9ccc361 | 2018-06-07 17:07:19 -0700 | [diff] [blame] | 462 | memcg_cgwb_list = &memcg->cgwb_list; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 463 | blkcg_cgwb_list = &blkcg->cgwb_list; |
| 464 | |
| 465 | /* look up again under lock and discard on blkcg mismatch */ |
| 466 | spin_lock_irqsave(&cgwb_lock, flags); |
| 467 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); |
| 468 | if (wb && wb->blkcg_css != blkcg_css) { |
| 469 | cgwb_kill(wb); |
| 470 | wb = NULL; |
| 471 | } |
| 472 | spin_unlock_irqrestore(&cgwb_lock, flags); |
| 473 | if (wb) |
| 474 | goto out_put; |
| 475 | |
| 476 | /* need to create a new one */ |
| 477 | wb = kmalloc(sizeof(*wb), gfp); |
Christophe JAILLET | 0b045bd | 2017-09-11 21:43:23 +0200 | [diff] [blame] | 478 | if (!wb) { |
| 479 | ret = -ENOMEM; |
| 480 | goto out_put; |
| 481 | } |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 482 | |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 483 | ret = wb_init(wb, bdi, gfp); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 484 | if (ret) |
| 485 | goto err_free; |
| 486 | |
| 487 | ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); |
| 488 | if (ret) |
| 489 | goto err_wb_exit; |
| 490 | |
Tejun Heo | 841710a | 2015-05-22 18:23:33 -0400 | [diff] [blame] | 491 | ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); |
| 492 | if (ret) |
| 493 | goto err_ref_exit; |
| 494 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 495 | wb->memcg_css = memcg_css; |
| 496 | wb->blkcg_css = blkcg_css; |
Roman Gushchin | f3b6a6d | 2021-06-28 19:35:53 -0700 | [diff] [blame] | 497 | INIT_LIST_HEAD(&wb->b_attached); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 498 | INIT_WORK(&wb->release_work, cgwb_release_workfn); |
| 499 | set_bit(WB_registered, &wb->state); |
| 500 | |
| 501 | /* |
| 502 | * The root wb determines the registered state of the whole bdi and |
| 503 | * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate |
| 504 | * whether they're still online. Don't link @wb if any is dead. |
| 505 | * See wb_memcg_offline() and wb_blkcg_offline(). |
| 506 | */ |
| 507 | ret = -ENODEV; |
| 508 | spin_lock_irqsave(&cgwb_lock, flags); |
| 509 | if (test_bit(WB_registered, &bdi->wb.state) && |
| 510 | blkcg_cgwb_list->next && memcg_cgwb_list->next) { |
| 511 | /* we might have raced another instance of this function */ |
| 512 | ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); |
| 513 | if (!ret) { |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 514 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 515 | list_add(&wb->memcg_node, memcg_cgwb_list); |
| 516 | list_add(&wb->blkcg_node, blkcg_cgwb_list); |
Tejun Heo | d866dbf | 2019-07-24 10:37:22 -0700 | [diff] [blame] | 517 | blkcg_pin_online(blkcg); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 518 | css_get(memcg_css); |
| 519 | css_get(blkcg_css); |
| 520 | } |
| 521 | } |
| 522 | spin_unlock_irqrestore(&cgwb_lock, flags); |
| 523 | if (ret) { |
| 524 | if (ret == -EEXIST) |
| 525 | ret = 0; |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 526 | goto err_fprop_exit; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 527 | } |
| 528 | goto out_put; |
| 529 | |
Tejun Heo | 841710a | 2015-05-22 18:23:33 -0400 | [diff] [blame] | 530 | err_fprop_exit: |
| 531 | fprop_local_destroy_percpu(&wb->memcg_completions); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 532 | err_ref_exit: |
| 533 | percpu_ref_exit(&wb->refcnt); |
| 534 | err_wb_exit: |
| 535 | wb_exit(wb); |
| 536 | err_free: |
| 537 | kfree(wb); |
| 538 | out_put: |
| 539 | css_put(blkcg_css); |
| 540 | return ret; |
| 541 | } |
| 542 | |
| 543 | /** |
Tejun Heo | ed288dc | 2019-08-26 09:06:54 -0700 | [diff] [blame] | 544 | * wb_get_lookup - get wb for a given memcg |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 545 | * @bdi: target bdi |
| 546 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 547 | * |
Tejun Heo | ed288dc | 2019-08-26 09:06:54 -0700 | [diff] [blame] | 548 | * Try to get the wb for @memcg_css on @bdi. The returned wb has its |
| 549 | * refcount incremented. |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 550 | * |
| 551 | * This function uses css_get() on @memcg_css and thus expects its refcnt |
| 552 | * to be positive on invocation. IOW, rcu_read_lock() protection on |
| 553 | * @memcg_css isn't enough. try_get it before calling this function. |
| 554 | * |
| 555 | * A wb is keyed by its associated memcg. As blkcg implicitly enables |
| 556 | * memcg on the default hierarchy, memcg association is guaranteed to be |
| 557 | * more specific (equal or descendant to the associated blkcg) and thus can |
| 558 | * identify both the memcg and blkcg associations. |
| 559 | * |
| 560 | * Because the blkcg associated with a memcg may change as blkcg is enabled |
| 561 | * and disabled closer to root in the hierarchy, each wb keeps track of |
| 562 | * both the memcg and blkcg associated with it and verifies the blkcg on |
| 563 | * each lookup. On mismatch, the existing wb is discarded and a new one is |
| 564 | * created. |
| 565 | */ |
Tejun Heo | ed288dc | 2019-08-26 09:06:54 -0700 | [diff] [blame] | 566 | struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, |
| 567 | struct cgroup_subsys_state *memcg_css) |
| 568 | { |
| 569 | struct bdi_writeback *wb; |
| 570 | |
| 571 | if (!memcg_css->parent) |
| 572 | return &bdi->wb; |
| 573 | |
| 574 | rcu_read_lock(); |
| 575 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); |
| 576 | if (wb) { |
| 577 | struct cgroup_subsys_state *blkcg_css; |
| 578 | |
| 579 | /* see whether the blkcg association has changed */ |
| 580 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); |
| 581 | if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) |
| 582 | wb = NULL; |
| 583 | css_put(blkcg_css); |
| 584 | } |
| 585 | rcu_read_unlock(); |
| 586 | |
| 587 | return wb; |
| 588 | } |
| 589 | |
| 590 | /** |
| 591 | * wb_get_create - get wb for a given memcg, create if necessary |
| 592 | * @bdi: target bdi |
| 593 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) |
| 594 | * @gfp: allocation mask to use |
| 595 | * |
| 596 | * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to |
| 597 | * create one. See wb_get_lookup() for more details. |
| 598 | */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 599 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, |
| 600 | struct cgroup_subsys_state *memcg_css, |
| 601 | gfp_t gfp) |
| 602 | { |
| 603 | struct bdi_writeback *wb; |
| 604 | |
Daniel Vetter | c1ca59a | 2021-02-25 17:18:45 -0800 | [diff] [blame] | 605 | might_alloc(gfp); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 606 | |
| 607 | if (!memcg_css->parent) |
| 608 | return &bdi->wb; |
| 609 | |
| 610 | do { |
Tejun Heo | ed288dc | 2019-08-26 09:06:54 -0700 | [diff] [blame] | 611 | wb = wb_get_lookup(bdi, memcg_css); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 612 | } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); |
| 613 | |
| 614 | return wb; |
| 615 | } |
| 616 | |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 617 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 618 | { |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 619 | int ret; |
| 620 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 621 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 622 | mutex_init(&bdi->cgwb_release_mutex); |
Tejun Heo | 7fc5854 | 2017-12-12 08:38:30 -0800 | [diff] [blame] | 623 | init_rwsem(&bdi->wb_switch_rwsem); |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 624 | |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 625 | ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 626 | if (!ret) { |
Johannes Weiner | 7d82860 | 2016-01-14 15:20:56 -0800 | [diff] [blame] | 627 | bdi->wb.memcg_css = &root_mem_cgroup->css; |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 628 | bdi->wb.blkcg_css = blkcg_root_css; |
| 629 | } |
| 630 | return ret; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 631 | } |
| 632 | |
Jan Kara | b1c51af | 2017-03-23 01:36:59 +0100 | [diff] [blame] | 633 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 634 | { |
| 635 | struct radix_tree_iter iter; |
| 636 | void **slot; |
Jan Kara | 5318ce7 | 2017-03-23 01:36:57 +0100 | [diff] [blame] | 637 | struct bdi_writeback *wb; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 638 | |
| 639 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); |
| 640 | |
| 641 | spin_lock_irq(&cgwb_lock); |
| 642 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) |
| 643 | cgwb_kill(*slot); |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 644 | spin_unlock_irq(&cgwb_lock); |
Jan Kara | 5318ce7 | 2017-03-23 01:36:57 +0100 | [diff] [blame] | 645 | |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 646 | mutex_lock(&bdi->cgwb_release_mutex); |
| 647 | spin_lock_irq(&cgwb_lock); |
Jan Kara | 5318ce7 | 2017-03-23 01:36:57 +0100 | [diff] [blame] | 648 | while (!list_empty(&bdi->wb_list)) { |
| 649 | wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, |
| 650 | bdi_node); |
| 651 | spin_unlock_irq(&cgwb_lock); |
| 652 | wb_shutdown(wb); |
| 653 | spin_lock_irq(&cgwb_lock); |
| 654 | } |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 655 | spin_unlock_irq(&cgwb_lock); |
Jan Kara | 3ee7e86 | 2018-06-18 15:46:58 +0200 | [diff] [blame] | 656 | mutex_unlock(&bdi->cgwb_release_mutex); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 657 | } |
| 658 | |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 659 | /* |
| 660 | * cleanup_offline_cgwbs_workfn - try to release dying cgwbs |
| 661 | * |
| 662 | * Try to release dying cgwbs by switching attached inodes to the nearest |
| 663 | * living ancestor's writeback. Processed wbs are placed at the end |
| 664 | * of the list to guarantee the forward progress. |
| 665 | */ |
| 666 | static void cleanup_offline_cgwbs_workfn(struct work_struct *work) |
| 667 | { |
| 668 | struct bdi_writeback *wb; |
| 669 | LIST_HEAD(processed); |
| 670 | |
| 671 | spin_lock_irq(&cgwb_lock); |
| 672 | |
| 673 | while (!list_empty(&offline_cgwbs)) { |
| 674 | wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, |
| 675 | offline_node); |
| 676 | list_move(&wb->offline_node, &processed); |
| 677 | |
| 678 | /* |
| 679 | * If wb is dirty, cleaning up the writeback by switching |
| 680 | * attached inodes will result in an effective removal of any |
| 681 | * bandwidth restrictions, which isn't the goal. Instead, |
| 682 | * it can be postponed until the next time, when all io |
| 683 | * will be likely completed. If in the meantime some inodes |
| 684 | * will get re-dirtied, they should be eventually switched to |
| 685 | * a new cgwb. |
| 686 | */ |
| 687 | if (wb_has_dirty_io(wb)) |
| 688 | continue; |
| 689 | |
| 690 | if (!wb_tryget(wb)) |
| 691 | continue; |
| 692 | |
| 693 | spin_unlock_irq(&cgwb_lock); |
| 694 | while (cleanup_offline_cgwb(wb)) |
| 695 | cond_resched(); |
| 696 | spin_lock_irq(&cgwb_lock); |
| 697 | |
| 698 | wb_put(wb); |
| 699 | } |
| 700 | |
| 701 | if (!list_empty(&processed)) |
| 702 | list_splice_tail(&processed, &offline_cgwbs); |
| 703 | |
| 704 | spin_unlock_irq(&cgwb_lock); |
| 705 | } |
| 706 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 707 | /** |
| 708 | * wb_memcg_offline - kill all wb's associated with a memcg being offlined |
| 709 | * @memcg: memcg being offlined |
| 710 | * |
| 711 | * Also prevents creation of any new wb's associated with @memcg. |
| 712 | */ |
| 713 | void wb_memcg_offline(struct mem_cgroup *memcg) |
| 714 | { |
Wang Long | 9ccc361 | 2018-06-07 17:07:19 -0700 | [diff] [blame] | 715 | struct list_head *memcg_cgwb_list = &memcg->cgwb_list; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 716 | struct bdi_writeback *wb, *next; |
| 717 | |
| 718 | spin_lock_irq(&cgwb_lock); |
| 719 | list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) |
| 720 | cgwb_kill(wb); |
| 721 | memcg_cgwb_list->next = NULL; /* prevent new wb's */ |
| 722 | spin_unlock_irq(&cgwb_lock); |
Roman Gushchin | c22d70a | 2021-06-28 19:36:03 -0700 | [diff] [blame] | 723 | |
| 724 | queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /** |
| 728 | * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined |
| 729 | * @blkcg: blkcg being offlined |
| 730 | * |
| 731 | * Also prevents creation of any new wb's associated with @blkcg. |
| 732 | */ |
| 733 | void wb_blkcg_offline(struct blkcg *blkcg) |
| 734 | { |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 735 | struct bdi_writeback *wb, *next; |
| 736 | |
| 737 | spin_lock_irq(&cgwb_lock); |
| 738 | list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) |
| 739 | cgwb_kill(wb); |
| 740 | blkcg->cgwb_list.next = NULL; /* prevent new wb's */ |
| 741 | spin_unlock_irq(&cgwb_lock); |
| 742 | } |
| 743 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 744 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
| 745 | { |
| 746 | spin_lock_irq(&cgwb_lock); |
| 747 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); |
| 748 | spin_unlock_irq(&cgwb_lock); |
| 749 | } |
| 750 | |
Tejun Heo | f183464 | 2018-05-23 10:56:32 -0700 | [diff] [blame] | 751 | static int __init cgwb_init(void) |
| 752 | { |
| 753 | /* |
| 754 | * There can be many concurrent release work items overwhelming |
| 755 | * system_wq. Put them in a separate wq and limit concurrency. |
| 756 | * There's no point in executing many of these in parallel. |
| 757 | */ |
| 758 | cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); |
| 759 | if (!cgwb_release_wq) |
| 760 | return -ENOMEM; |
| 761 | |
| 762 | return 0; |
| 763 | } |
| 764 | subsys_initcall(cgwb_init); |
| 765 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 766 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 767 | |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 768 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
| 769 | { |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 770 | return wb_init(&bdi->wb, bdi, GFP_KERNEL); |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 771 | } |
| 772 | |
Jan Kara | b1c51af | 2017-03-23 01:36:59 +0100 | [diff] [blame] | 773 | static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } |
Jan Kara | df23de5 | 2017-03-08 17:48:32 +0100 | [diff] [blame] | 774 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 775 | static void cgwb_bdi_register(struct backing_dev_info *bdi) |
| 776 | { |
| 777 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); |
| 778 | } |
| 779 | |
| 780 | static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) |
| 781 | { |
| 782 | list_del_rcu(&wb->bdi_node); |
| 783 | } |
| 784 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 785 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 786 | |
Jan Kara | 2e82b84 | 2017-04-12 12:24:48 +0200 | [diff] [blame] | 787 | static int bdi_init(struct backing_dev_info *bdi) |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 788 | { |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 789 | int ret; |
| 790 | |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 791 | bdi->dev = NULL; |
| 792 | |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 793 | kref_init(&bdi->refcnt); |
Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 794 | bdi->min_ratio = 0; |
Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 795 | bdi->max_ratio = 100; |
Jan Kara | eb608e3 | 2012-05-24 18:59:11 +0200 | [diff] [blame] | 796 | bdi->max_prop_frac = FPROP_FRAC_BASE; |
Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 797 | INIT_LIST_HEAD(&bdi->bdi_list); |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 798 | INIT_LIST_HEAD(&bdi->wb_list); |
Tejun Heo | cc395d7 | 2015-05-22 17:13:58 -0400 | [diff] [blame] | 799 | init_waitqueue_head(&bdi->wb_waitq); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 800 | |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 801 | ret = cgwb_bdi_init(bdi); |
| 802 | |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 803 | return ret; |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 804 | } |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 805 | |
Christoph Hellwig | aef33c2 | 2020-05-04 14:48:00 +0200 | [diff] [blame] | 806 | struct backing_dev_info *bdi_alloc(int node_id) |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 807 | { |
| 808 | struct backing_dev_info *bdi; |
| 809 | |
Christoph Hellwig | aef33c2 | 2020-05-04 14:48:00 +0200 | [diff] [blame] | 810 | bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 811 | if (!bdi) |
| 812 | return NULL; |
| 813 | |
| 814 | if (bdi_init(bdi)) { |
| 815 | kfree(bdi); |
| 816 | return NULL; |
| 817 | } |
Christoph Hellwig | f56753a | 2020-09-24 08:51:40 +0200 | [diff] [blame] | 818 | bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; |
Christoph Hellwig | 55b2598 | 2020-09-24 08:51:32 +0200 | [diff] [blame] | 819 | bdi->ra_pages = VM_READAHEAD_PAGES; |
| 820 | bdi->io_pages = VM_READAHEAD_PAGES; |
Christoph Hellwig | 5ed964f | 2021-08-09 16:17:40 +0200 | [diff] [blame] | 821 | timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 822 | return bdi; |
| 823 | } |
Christoph Hellwig | aef33c2 | 2020-05-04 14:48:00 +0200 | [diff] [blame] | 824 | EXPORT_SYMBOL(bdi_alloc); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 825 | |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 826 | static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) |
| 827 | { |
| 828 | struct rb_node **p = &bdi_tree.rb_node; |
| 829 | struct rb_node *parent = NULL; |
| 830 | struct backing_dev_info *bdi; |
| 831 | |
| 832 | lockdep_assert_held(&bdi_lock); |
| 833 | |
| 834 | while (*p) { |
| 835 | parent = *p; |
| 836 | bdi = rb_entry(parent, struct backing_dev_info, rb_node); |
| 837 | |
| 838 | if (bdi->id > id) |
| 839 | p = &(*p)->rb_left; |
| 840 | else if (bdi->id < id) |
| 841 | p = &(*p)->rb_right; |
| 842 | else |
| 843 | break; |
| 844 | } |
| 845 | |
| 846 | if (parentp) |
| 847 | *parentp = parent; |
| 848 | return p; |
| 849 | } |
| 850 | |
| 851 | /** |
| 852 | * bdi_get_by_id - lookup and get bdi from its id |
| 853 | * @id: bdi id to lookup |
| 854 | * |
| 855 | * Find bdi matching @id and get it. Returns NULL if the matching bdi |
| 856 | * doesn't exist or is already unregistered. |
| 857 | */ |
| 858 | struct backing_dev_info *bdi_get_by_id(u64 id) |
| 859 | { |
| 860 | struct backing_dev_info *bdi = NULL; |
| 861 | struct rb_node **p; |
| 862 | |
| 863 | spin_lock_bh(&bdi_lock); |
| 864 | p = bdi_lookup_rb_node(id, NULL); |
| 865 | if (*p) { |
| 866 | bdi = rb_entry(*p, struct backing_dev_info, rb_node); |
| 867 | bdi_get(bdi); |
| 868 | } |
| 869 | spin_unlock_bh(&bdi_lock); |
| 870 | |
| 871 | return bdi; |
| 872 | } |
| 873 | |
Jan Kara | 7c4cc30 | 2017-04-12 12:24:49 +0200 | [diff] [blame] | 874 | int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 875 | { |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 876 | struct device *dev; |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 877 | struct rb_node *parent, **p; |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 878 | |
| 879 | if (bdi->dev) /* The driver needs to use separate queues per device */ |
| 880 | return 0; |
| 881 | |
Christoph Hellwig | 6bd87ee | 2020-05-04 14:47:56 +0200 | [diff] [blame] | 882 | vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); |
| 883 | dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 884 | if (IS_ERR(dev)) |
| 885 | return PTR_ERR(dev); |
| 886 | |
Jan Kara | e8cb72b | 2017-03-23 01:36:56 +0100 | [diff] [blame] | 887 | cgwb_bdi_register(bdi); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 888 | bdi->dev = dev; |
| 889 | |
Jens Axboe | 6d0e482 | 2017-12-21 10:01:30 -0700 | [diff] [blame] | 890 | bdi_debug_register(bdi, dev_name(dev)); |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 891 | set_bit(WB_registered, &bdi->wb.state); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 892 | |
| 893 | spin_lock_bh(&bdi_lock); |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 894 | |
| 895 | bdi->id = ++bdi_id_cursor; |
| 896 | |
| 897 | p = bdi_lookup_rb_node(bdi->id, &parent); |
| 898 | rb_link_node(&bdi->rb_node, parent, p); |
| 899 | rb_insert_color(&bdi->rb_node, &bdi_tree); |
| 900 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 901 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 902 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 903 | spin_unlock_bh(&bdi_lock); |
| 904 | |
| 905 | trace_writeback_bdi_register(bdi); |
| 906 | return 0; |
| 907 | } |
Jan Kara | baf7a61 | 2017-04-12 12:24:25 +0200 | [diff] [blame] | 908 | |
Jan Kara | 7c4cc30 | 2017-04-12 12:24:49 +0200 | [diff] [blame] | 909 | int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) |
Jan Kara | baf7a61 | 2017-04-12 12:24:25 +0200 | [diff] [blame] | 910 | { |
| 911 | va_list args; |
| 912 | int ret; |
| 913 | |
| 914 | va_start(args, fmt); |
Jan Kara | 7c4cc30 | 2017-04-12 12:24:49 +0200 | [diff] [blame] | 915 | ret = bdi_register_va(bdi, fmt, args); |
Jan Kara | baf7a61 | 2017-04-12 12:24:25 +0200 | [diff] [blame] | 916 | va_end(args); |
| 917 | return ret; |
| 918 | } |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 919 | EXPORT_SYMBOL(bdi_register); |
| 920 | |
Christoph Hellwig | 3c5d202 | 2020-05-04 14:47:59 +0200 | [diff] [blame] | 921 | void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 922 | { |
Christoph Hellwig | 3c5d202 | 2020-05-04 14:47:59 +0200 | [diff] [blame] | 923 | WARN_ON_ONCE(bdi->owner); |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 924 | bdi->owner = owner; |
| 925 | get_device(owner); |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 926 | } |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 927 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 928 | /* |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 929 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 930 | */ |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 931 | static void bdi_remove_from_list(struct backing_dev_info *bdi) |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 932 | { |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 933 | spin_lock_bh(&bdi_lock); |
Tejun Heo | 34f8fe5 | 2019-08-26 09:06:53 -0700 | [diff] [blame] | 934 | rb_erase(&bdi->rb_node, &bdi_tree); |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 935 | list_del_rcu(&bdi->bdi_list); |
| 936 | spin_unlock_bh(&bdi_lock); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 937 | |
Tejun Heo | 4610007 | 2015-05-22 17:13:31 -0400 | [diff] [blame] | 938 | synchronize_rcu_expedited(); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 939 | } |
| 940 | |
Tejun Heo | b02176f | 2015-09-08 12:20:22 -0400 | [diff] [blame] | 941 | void bdi_unregister(struct backing_dev_info *bdi) |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 942 | { |
Christoph Hellwig | 5ed964f | 2021-08-09 16:17:40 +0200 | [diff] [blame] | 943 | del_timer_sync(&bdi->laptop_mode_wb_timer); |
| 944 | |
Tejun Heo | f0054bb | 2015-05-22 17:13:30 -0400 | [diff] [blame] | 945 | /* make sure nobody finds us on the bdi_list anymore */ |
| 946 | bdi_remove_from_list(bdi); |
| 947 | wb_shutdown(&bdi->wb); |
Jan Kara | b1c51af | 2017-03-23 01:36:59 +0100 | [diff] [blame] | 948 | cgwb_bdi_unregister(bdi); |
Rabin Vincent | 7a401a9 | 2011-11-11 13:29:04 +0100 | [diff] [blame] | 949 | |
Christoph Hellwig | c4db59d | 2015-01-20 14:05:00 -0700 | [diff] [blame] | 950 | if (bdi->dev) { |
| 951 | bdi_debug_unregister(bdi); |
| 952 | device_unregister(bdi->dev); |
| 953 | bdi->dev = NULL; |
| 954 | } |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 955 | |
| 956 | if (bdi->owner) { |
| 957 | put_device(bdi->owner); |
| 958 | bdi->owner = NULL; |
| 959 | } |
Tejun Heo | b02176f | 2015-09-08 12:20:22 -0400 | [diff] [blame] | 960 | } |
Christoph Hellwig | c4db59d | 2015-01-20 14:05:00 -0700 | [diff] [blame] | 961 | |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 962 | static void release_bdi(struct kref *ref) |
| 963 | { |
| 964 | struct backing_dev_info *bdi = |
| 965 | container_of(ref, struct backing_dev_info, refcnt); |
| 966 | |
Jan Kara | 5af110b | 2017-04-12 12:24:26 +0200 | [diff] [blame] | 967 | if (test_bit(WB_registered, &bdi->wb.state)) |
| 968 | bdi_unregister(bdi); |
Jan Kara | 2e82b84 | 2017-04-12 12:24:48 +0200 | [diff] [blame] | 969 | WARN_ON_ONCE(bdi->dev); |
| 970 | wb_exit(&bdi->wb); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 971 | kfree(bdi); |
| 972 | } |
| 973 | |
| 974 | void bdi_put(struct backing_dev_info *bdi) |
| 975 | { |
| 976 | kref_put(&bdi->refcnt, release_bdi); |
| 977 | } |
Jan Kara | 62bf42a | 2017-04-12 12:24:27 +0200 | [diff] [blame] | 978 | EXPORT_SYMBOL(bdi_put); |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 979 | |
Christoph Hellwig | eb7ae5e | 2020-05-04 14:47:54 +0200 | [diff] [blame] | 980 | const char *bdi_dev_name(struct backing_dev_info *bdi) |
| 981 | { |
| 982 | if (!bdi || !bdi->dev) |
| 983 | return bdi_unknown_name; |
Christoph Hellwig | 6bd87ee | 2020-05-04 14:47:56 +0200 | [diff] [blame] | 984 | return bdi->dev_name; |
Christoph Hellwig | eb7ae5e | 2020-05-04 14:47:54 +0200 | [diff] [blame] | 985 | } |
| 986 | EXPORT_SYMBOL_GPL(bdi_dev_name); |
| 987 | |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 988 | static wait_queue_head_t congestion_wqh[2] = { |
| 989 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
| 990 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
| 991 | }; |
Tejun Heo | ec8a6f2 | 2015-05-22 17:13:41 -0400 | [diff] [blame] | 992 | static atomic_t nr_wb_congested[2]; |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 993 | |
Christoph Hellwig | 492d76b | 2020-07-01 11:06:20 +0200 | [diff] [blame] | 994 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 995 | { |
Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 996 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
Kaixu Xia | c877ef8 | 2016-03-31 13:19:41 +0000 | [diff] [blame] | 997 | enum wb_congested_state bit; |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 998 | |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 999 | bit = sync ? WB_sync_congested : WB_async_congested; |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 1000 | if (test_and_clear_bit(bit, &bdi->wb.congested)) |
Tejun Heo | ec8a6f2 | 2015-05-22 17:13:41 -0400 | [diff] [blame] | 1001 | atomic_dec(&nr_wb_congested[sync]); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 1002 | smp_mb__after_atomic(); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1003 | if (waitqueue_active(wqh)) |
| 1004 | wake_up(wqh); |
| 1005 | } |
Christoph Hellwig | 492d76b | 2020-07-01 11:06:20 +0200 | [diff] [blame] | 1006 | EXPORT_SYMBOL(clear_bdi_congested); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1007 | |
Christoph Hellwig | 492d76b | 2020-07-01 11:06:20 +0200 | [diff] [blame] | 1008 | void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1009 | { |
Kaixu Xia | c877ef8 | 2016-03-31 13:19:41 +0000 | [diff] [blame] | 1010 | enum wb_congested_state bit; |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1011 | |
Tejun Heo | 4452226 | 2015-05-22 17:13:26 -0400 | [diff] [blame] | 1012 | bit = sync ? WB_sync_congested : WB_async_congested; |
Christoph Hellwig | 8c911f3 | 2020-07-01 11:06:21 +0200 | [diff] [blame] | 1013 | if (!test_and_set_bit(bit, &bdi->wb.congested)) |
Tejun Heo | ec8a6f2 | 2015-05-22 17:13:41 -0400 | [diff] [blame] | 1014 | atomic_inc(&nr_wb_congested[sync]); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1015 | } |
Christoph Hellwig | 492d76b | 2020-07-01 11:06:20 +0200 | [diff] [blame] | 1016 | EXPORT_SYMBOL(set_bdi_congested); |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1017 | |
| 1018 | /** |
| 1019 | * congestion_wait - wait for a backing_dev to become uncongested |
Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 1020 | * @sync: SYNC or ASYNC IO |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1021 | * @timeout: timeout in jiffies |
| 1022 | * |
| 1023 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit |
| 1024 | * write congestion. If no backing_devs are congested then just wait for the |
| 1025 | * next write to be completed. |
| 1026 | */ |
Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 1027 | long congestion_wait(int sync, long timeout) |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1028 | { |
| 1029 | long ret; |
Mel Gorman | 52bb919 | 2010-10-26 14:21:41 -0700 | [diff] [blame] | 1030 | unsigned long start = jiffies; |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1031 | DEFINE_WAIT(wait); |
Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 1032 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1033 | |
| 1034 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 1035 | ret = io_schedule_timeout(timeout); |
| 1036 | finish_wait(wqh, &wait); |
Mel Gorman | 52bb919 | 2010-10-26 14:21:41 -0700 | [diff] [blame] | 1037 | |
| 1038 | trace_writeback_congestion_wait(jiffies_to_usecs(timeout), |
| 1039 | jiffies_to_usecs(jiffies - start)); |
| 1040 | |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1041 | return ret; |
| 1042 | } |
| 1043 | EXPORT_SYMBOL(congestion_wait); |
| 1044 | |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1045 | /** |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1046 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1047 | * @sync: SYNC or ASYNC IO |
| 1048 | * @timeout: timeout in jiffies |
| 1049 | * |
Andrey Ryabinin | e3c1ac5 | 2018-04-10 16:28:03 -0700 | [diff] [blame] | 1050 | * In the event of a congested backing_dev (any backing_dev) this waits |
| 1051 | * for up to @timeout jiffies for either a BDI to exit congestion of the |
| 1052 | * given @sync queue or a write to complete. |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1053 | * |
| 1054 | * The return value is 0 if the sleep is for the full timeout. Otherwise, |
| 1055 | * it is the number of jiffies that were still remaining when the function |
| 1056 | * returned. return_value == timeout implies the function did not sleep. |
| 1057 | */ |
Andrey Ryabinin | e3c1ac5 | 2018-04-10 16:28:03 -0700 | [diff] [blame] | 1058 | long wait_iff_congested(int sync, long timeout) |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1059 | { |
| 1060 | long ret; |
| 1061 | unsigned long start = jiffies; |
| 1062 | DEFINE_WAIT(wait); |
| 1063 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
| 1064 | |
| 1065 | /* |
Andrey Ryabinin | e3c1ac5 | 2018-04-10 16:28:03 -0700 | [diff] [blame] | 1066 | * If there is no congestion, yield if necessary instead |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1067 | * of sleeping on the congestion queue |
| 1068 | */ |
Andrey Ryabinin | e3c1ac5 | 2018-04-10 16:28:03 -0700 | [diff] [blame] | 1069 | if (atomic_read(&nr_wb_congested[sync]) == 0) { |
Michal Hocko | ede3771 | 2016-05-20 16:57:03 -0700 | [diff] [blame] | 1070 | cond_resched(); |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 1071 | |
Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 1072 | /* In case we scheduled, work out time remaining */ |
| 1073 | ret = timeout - (jiffies - start); |
| 1074 | if (ret < 0) |
| 1075 | ret = 0; |
| 1076 | |
| 1077 | goto out; |
| 1078 | } |
| 1079 | |
| 1080 | /* Sleep until uncongested or a write happens */ |
| 1081 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 1082 | ret = io_schedule_timeout(timeout); |
| 1083 | finish_wait(wqh, &wait); |
| 1084 | |
| 1085 | out: |
| 1086 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), |
| 1087 | jiffies_to_usecs(jiffies - start)); |
| 1088 | |
| 1089 | return ret; |
| 1090 | } |
| 1091 | EXPORT_SYMBOL(wait_iff_congested); |