blob: c360f6a6c8443fe6a78d325fefd2d6c45338e38c [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Andrew Morton3fcfab12006-10-19 23:28:16 -07002
3#include <linux/wait.h>
Tejun Heo34f8fe52019-08-26 09:06:53 -07004#include <linux/rbtree.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07005#include <linux/backing-dev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +02006#include <linux/kthread.h>
7#include <linux/freezer.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07008#include <linux/fs.h>
Jens Axboe26160152009-03-17 09:35:06 +01009#include <linux/pagemap.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020010#include <linux/mm.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070011#include <linux/sched.h>
12#include <linux/module.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070013#include <linux/writeback.h>
14#include <linux/device.h>
Dave Chinner455b2862010-07-07 13:24:06 +100015#include <trace/events/writeback.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070016
Jörn Engel5129a462010-04-25 08:54:42 +020017struct backing_dev_info noop_backing_dev_info = {
18 .name = "noop",
Jan Kara976e48f2010-09-21 11:48:55 +020019 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
Jörn Engel5129a462010-04-25 08:54:42 +020020};
Tejun Heoa212b102015-05-22 17:13:33 -040021EXPORT_SYMBOL_GPL(noop_backing_dev_info);
Jörn Engel5129a462010-04-25 08:54:42 +020022
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070023static struct class *bdi_class;
Jens Axboecfc4ba52009-09-14 13:12:40 +020024
25/*
Tejun Heo34f8fe52019-08-26 09:06:53 -070026 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
27 * reader side locking.
Jens Axboecfc4ba52009-09-14 13:12:40 +020028 */
Jens Axboe03ba3782009-09-09 09:08:54 +020029DEFINE_SPINLOCK(bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -070030static u64 bdi_id_cursor;
31static struct rb_root bdi_tree = RB_ROOT;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020032LIST_HEAD(bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +020033
Tejun Heo839a8e82013-04-01 19:08:06 -070034/* bdi_wq serves all asynchronous writeback tasks */
35struct workqueue_struct *bdi_wq;
36
Miklos Szeredi76f14182008-04-30 00:54:36 -070037#ifdef CONFIG_DEBUG_FS
38#include <linux/debugfs.h>
39#include <linux/seq_file.h>
40
41static struct dentry *bdi_debug_root;
42
43static void bdi_debug_init(void)
44{
45 bdi_debug_root = debugfs_create_dir("bdi", NULL);
46}
47
48static int bdi_debug_stats_show(struct seq_file *m, void *v)
49{
50 struct backing_dev_info *bdi = m->private;
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020051 struct bdi_writeback *wb = &bdi->wb;
David Rientjes364aeb22009-01-06 14:39:29 -080052 unsigned long background_thresh;
53 unsigned long dirty_thresh;
Tejun Heo0d960a32015-05-22 18:23:19 -040054 unsigned long wb_thresh;
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050055 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
Jens Axboef09b00d2009-05-25 09:08:21 +020056 struct inode *inode;
57
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050058 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060059 spin_lock(&wb->list_lock);
Dave Chinnerc7f54082015-03-04 14:07:22 -050060 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020061 nr_dirty++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050062 list_for_each_entry(inode, &wb->b_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020063 nr_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050064 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020065 nr_more_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050066 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050067 if (inode->i_state & I_DIRTY_TIME)
68 nr_dirty_time++;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060069 spin_unlock(&wb->list_lock);
Miklos Szeredi76f14182008-04-30 00:54:36 -070070
Wu Fengguang16c40422010-08-11 14:17:39 -070071 global_dirty_limits(&background_thresh, &dirty_thresh);
Tejun Heo0d960a32015-05-22 18:23:19 -040072 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
Miklos Szeredi76f14182008-04-30 00:54:36 -070073
74#define K(x) ((x) << (PAGE_SHIFT - 10))
75 seq_printf(m,
Wu Fengguang00821b02010-08-29 11:28:45 -060076 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
Wu Fengguangc8e28ce2011-01-23 10:07:47 -060081 "BdiDirtied: %10lu kB\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060082 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
84 "b_dirty: %10lu\n"
85 "b_io: %10lu\n"
86 "b_more_io: %10lu\n"
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050087 "b_dirty_time: %10lu\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060088 "bdi_list: %10u\n"
89 "state: %10lx\n",
Tejun Heo93f78d82015-05-22 17:13:27 -040090 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
91 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
Tejun Heo0d960a32015-05-22 18:23:19 -040092 K(wb_thresh),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060093 K(dirty_thresh),
94 K(background_thresh),
Tejun Heo93f78d82015-05-22 17:13:27 -040095 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
96 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
Tejun Heoa88a3412015-05-22 17:13:28 -040097 (unsigned long) K(wb->write_bandwidth),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060098 nr_dirty,
99 nr_io,
100 nr_more_io,
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500101 nr_dirty_time,
Tejun Heo44522262015-05-22 17:13:26 -0400102 !list_empty(&bdi->bdi_list), bdi->wb.state);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700103#undef K
104
105 return 0;
106}
Andy Shevchenko5ad35092018-04-05 16:23:16 -0700107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700108
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
Miklos Szeredi76f14182008-04-30 00:54:36 -0700110{
111 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
weiping zhang97f07692017-10-31 18:37:54 +0800112
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100113 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
114 &bdi_debug_stats_fops);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700115}
116
117static void bdi_debug_unregister(struct backing_dev_info *bdi)
118{
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100119 debugfs_remove_recursive(bdi->debug_dir);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700120}
121#else
122static inline void bdi_debug_init(void)
123{
124}
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100125static inline void bdi_debug_register(struct backing_dev_info *bdi,
Miklos Szeredi76f14182008-04-30 00:54:36 -0700126 const char *name)
127{
128}
129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
130{
131}
132#endif
133
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700134static ssize_t read_ahead_kb_store(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
137{
138 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700139 unsigned long read_ahead_kb;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800140 ssize_t ret;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700141
Namjae Jeon7034ed12012-08-25 16:57:27 +0800142 ret = kstrtoul(buf, 10, &read_ahead_kb);
143 if (ret < 0)
144 return ret;
145
146 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
147
148 return count;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700149}
150
151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
152
153#define BDI_SHOW(name, expr) \
154static ssize_t name##_show(struct device *dev, \
155 struct device_attribute *attr, char *page) \
156{ \
157 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
158 \
159 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700160} \
161static DEVICE_ATTR_RW(name);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700162
163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
164
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700165static ssize_t min_ratio_store(struct device *dev,
166 struct device_attribute *attr, const char *buf, size_t count)
167{
168 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700169 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800170 ssize_t ret;
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700171
Namjae Jeon7034ed12012-08-25 16:57:27 +0800172 ret = kstrtouint(buf, 10, &ratio);
173 if (ret < 0)
174 return ret;
175
176 ret = bdi_set_min_ratio(bdi, ratio);
177 if (!ret)
178 ret = count;
179
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700180 return ret;
181}
182BDI_SHOW(min_ratio, bdi->min_ratio)
183
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700184static ssize_t max_ratio_store(struct device *dev,
185 struct device_attribute *attr, const char *buf, size_t count)
186{
187 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700188 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800189 ssize_t ret;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700190
Namjae Jeon7034ed12012-08-25 16:57:27 +0800191 ret = kstrtouint(buf, 10, &ratio);
192 if (ret < 0)
193 return ret;
194
195 ret = bdi_set_max_ratio(bdi, ratio);
196 if (!ret)
197 ret = count;
198
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700199 return ret;
200}
201BDI_SHOW(max_ratio, bdi->max_ratio)
202
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800203static ssize_t stable_pages_required_show(struct device *dev,
204 struct device_attribute *attr,
205 char *page)
206{
207 struct backing_dev_info *bdi = dev_get_drvdata(dev);
208
209 return snprintf(page, PAGE_SIZE-1, "%d\n",
210 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
211}
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700212static DEVICE_ATTR_RO(stable_pages_required);
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800213
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700214static struct attribute *bdi_dev_attrs[] = {
215 &dev_attr_read_ahead_kb.attr,
216 &dev_attr_min_ratio.attr,
217 &dev_attr_max_ratio.attr,
218 &dev_attr_stable_pages_required.attr,
219 NULL,
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700220};
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700221ATTRIBUTE_GROUPS(bdi_dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700222
223static __init int bdi_class_init(void)
224{
225 bdi_class = class_create(THIS_MODULE, "bdi");
Anton Blanchard14421452010-04-02 09:46:55 +0200226 if (IS_ERR(bdi_class))
227 return PTR_ERR(bdi_class);
228
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700229 bdi_class->dev_groups = bdi_dev_groups;
Miklos Szeredi76f14182008-04-30 00:54:36 -0700230 bdi_debug_init();
Jan Karad03f6cd2017-02-02 15:56:51 +0100231
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700232 return 0;
233}
Miklos Szeredi76f14182008-04-30 00:54:36 -0700234postcore_initcall(bdi_class_init);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700235
Jan Kara2e82b842017-04-12 12:24:48 +0200236static int bdi_init(struct backing_dev_info *bdi);
237
Jens Axboe26160152009-03-17 09:35:06 +0100238static int __init default_bdi_init(void)
239{
240 int err;
241
Mika Westerberga2b90f12019-10-04 13:00:24 +0300242 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
243 WQ_SYSFS, 0);
Tejun Heo839a8e82013-04-01 19:08:06 -0700244 if (!bdi_wq)
245 return -ENOMEM;
246
Jan Kara976e48f2010-09-21 11:48:55 +0200247 err = bdi_init(&noop_backing_dev_info);
Jens Axboe26160152009-03-17 09:35:06 +0100248
249 return err;
250}
251subsys_initcall(default_bdi_init);
252
Artem Bityutskiy64677162010-07-25 14:29:22 +0300253/*
Tejun Heof0054bb2015-05-22 17:13:30 -0400254 * This function is used when the first inode for this wb is marked dirty. It
Artem Bityutskiy64677162010-07-25 14:29:22 +0300255 * wakes-up the corresponding bdi thread which should then take care of the
256 * periodic background write-out of dirty inodes. Since the write-out would
257 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
258 * set up a timer which wakes the bdi thread up later.
259 *
260 * Note, we wouldn't bother setting up the timer, but this function is on the
261 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
262 * by delaying the wake-up.
Derek Basehore6ca738d2014-04-03 14:46:22 -0700263 *
264 * We have to be careful not to postpone flush work if it is scheduled for
265 * earlier. Thus we use queue_delayed_work().
Artem Bityutskiy64677162010-07-25 14:29:22 +0300266 */
Tejun Heof0054bb2015-05-22 17:13:30 -0400267void wb_wakeup_delayed(struct bdi_writeback *wb)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300268{
269 unsigned long timeout;
270
271 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
Tejun Heof0054bb2015-05-22 17:13:30 -0400272 spin_lock_bh(&wb->work_lock);
273 if (test_bit(WB_registered, &wb->state))
274 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
275 spin_unlock_bh(&wb->work_lock);
Jens Axboe03ba3782009-09-09 09:08:54 +0200276}
277
Jens Axboecfc4ba52009-09-14 13:12:40 +0200278/*
Tejun Heoa88a3412015-05-22 17:13:28 -0400279 * Initial write bandwidth: 100 MB/s
Jens Axboecfc4ba52009-09-14 13:12:40 +0200280 */
Tejun Heoa88a3412015-05-22 17:13:28 -0400281#define INIT_BW (100 << (20 - PAGE_SHIFT))
Jens Axboecfc4ba52009-09-14 13:12:40 +0200282
Tejun Heo8395cd92015-05-22 17:13:34 -0400283static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
Tejun Heoa13f35e2015-07-02 08:44:34 -0600284 int blkcg_id, gfp_t gfp)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300285{
Tejun Heo93f78d82015-05-22 17:13:27 -0400286 int i, err;
287
Artem Bityutskiy64677162010-07-25 14:29:22 +0300288 memset(wb, 0, sizeof(*wb));
289
Jan Kara810df542017-03-23 01:36:55 +0100290 if (wb != &bdi->wb)
291 bdi_get(bdi);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300292 wb->bdi = bdi;
293 wb->last_old_flush = jiffies;
294 INIT_LIST_HEAD(&wb->b_dirty);
295 INIT_LIST_HEAD(&wb->b_io);
296 INIT_LIST_HEAD(&wb->b_more_io);
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500297 INIT_LIST_HEAD(&wb->b_dirty_time);
Christoph Hellwigf758eea2011-04-21 18:19:44 -0600298 spin_lock_init(&wb->list_lock);
Tejun Heo93f78d82015-05-22 17:13:27 -0400299
Tejun Heoa88a3412015-05-22 17:13:28 -0400300 wb->bw_time_stamp = jiffies;
301 wb->balanced_dirty_ratelimit = INIT_BW;
302 wb->dirty_ratelimit = INIT_BW;
303 wb->write_bandwidth = INIT_BW;
304 wb->avg_write_bandwidth = INIT_BW;
305
Tejun Heof0054bb2015-05-22 17:13:30 -0400306 spin_lock_init(&wb->work_lock);
307 INIT_LIST_HEAD(&wb->work_list);
308 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
Jens Axboeb57d74a2016-09-01 10:20:33 -0600309 wb->dirty_sleep = jiffies;
Tejun Heof0054bb2015-05-22 17:13:30 -0400310
Tejun Heoa13f35e2015-07-02 08:44:34 -0600311 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
Jan Kara810df542017-03-23 01:36:55 +0100312 if (!wb->congested) {
313 err = -ENOMEM;
314 goto out_put_bdi;
315 }
Tejun Heoa13f35e2015-07-02 08:44:34 -0600316
Tejun Heo8395cd92015-05-22 17:13:34 -0400317 err = fprop_local_init_percpu(&wb->completions, gfp);
Tejun Heoa88a3412015-05-22 17:13:28 -0400318 if (err)
Tejun Heoa13f35e2015-07-02 08:44:34 -0600319 goto out_put_cong;
Tejun Heoa88a3412015-05-22 17:13:28 -0400320
Tejun Heo93f78d82015-05-22 17:13:27 -0400321 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
Tejun Heo8395cd92015-05-22 17:13:34 -0400322 err = percpu_counter_init(&wb->stat[i], 0, gfp);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600323 if (err)
324 goto out_destroy_stat;
Tejun Heo93f78d82015-05-22 17:13:27 -0400325 }
326
327 return 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600328
329out_destroy_stat:
Rasmus Villemoes078c6c32016-02-11 16:13:06 -0800330 while (i--)
Tejun Heoa13f35e2015-07-02 08:44:34 -0600331 percpu_counter_destroy(&wb->stat[i]);
332 fprop_local_destroy_percpu(&wb->completions);
333out_put_cong:
334 wb_congested_put(wb->congested);
Jan Kara810df542017-03-23 01:36:55 +0100335out_put_bdi:
336 if (wb != &bdi->wb)
337 bdi_put(bdi);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600338 return err;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700339}
Jens Axboe03ba3782009-09-09 09:08:54 +0200340
Jan Karae8cb72b2017-03-23 01:36:56 +0100341static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
342
Tejun Heo46100072015-05-22 17:13:31 -0400343/*
344 * Remove bdi from the global list and shutdown any threads we have running
345 */
346static void wb_shutdown(struct bdi_writeback *wb)
347{
348 /* Make sure nobody queues further work */
349 spin_lock_bh(&wb->work_lock);
350 if (!test_and_clear_bit(WB_registered, &wb->state)) {
351 spin_unlock_bh(&wb->work_lock);
352 return;
353 }
354 spin_unlock_bh(&wb->work_lock);
355
Jan Karae8cb72b2017-03-23 01:36:56 +0100356 cgwb_remove_from_bdi_list(wb);
Tejun Heo46100072015-05-22 17:13:31 -0400357 /*
358 * Drain work list and shutdown the delayed_work. !WB_registered
359 * tells wb_workfn() that @wb is dying and its work_list needs to
360 * be drained no matter what.
361 */
362 mod_delayed_work(bdi_wq, &wb->dwork, 0);
363 flush_delayed_work(&wb->dwork);
364 WARN_ON(!list_empty(&wb->work_list));
365}
366
Tejun Heof0054bb2015-05-22 17:13:30 -0400367static void wb_exit(struct bdi_writeback *wb)
Tejun Heo93f78d82015-05-22 17:13:27 -0400368{
369 int i;
370
371 WARN_ON(delayed_work_pending(&wb->dwork));
372
373 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
374 percpu_counter_destroy(&wb->stat[i]);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300375
Tejun Heoa88a3412015-05-22 17:13:28 -0400376 fprop_local_destroy_percpu(&wb->completions);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600377 wb_congested_put(wb->congested);
Jan Kara810df542017-03-23 01:36:55 +0100378 if (wb != &wb->bdi->wb)
379 bdi_put(wb->bdi);
Tejun Heoa88a3412015-05-22 17:13:28 -0400380}
Wu Fengguange98be2d2010-08-29 11:22:30 -0600381
Tejun Heo52ebea72015-05-22 17:13:37 -0400382#ifdef CONFIG_CGROUP_WRITEBACK
383
384#include <linux/memcontrol.h>
385
386/*
387 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
388 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
Jan Kara45144512017-03-23 01:36:58 +0100389 * protected.
Tejun Heo52ebea72015-05-22 17:13:37 -0400390 */
391static DEFINE_SPINLOCK(cgwb_lock);
Tejun Heof1834642018-05-23 10:56:32 -0700392static struct workqueue_struct *cgwb_release_wq;
Tejun Heo52ebea72015-05-22 17:13:37 -0400393
394/**
395 * wb_congested_get_create - get or create a wb_congested
396 * @bdi: associated bdi
397 * @blkcg_id: ID of the associated blkcg
398 * @gfp: allocation mask
399 *
400 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
401 * The returned wb_congested has its reference count incremented. Returns
402 * NULL on failure.
403 */
404struct bdi_writeback_congested *
405wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
406{
407 struct bdi_writeback_congested *new_congested = NULL, *congested;
408 struct rb_node **node, *parent;
409 unsigned long flags;
Tejun Heo52ebea72015-05-22 17:13:37 -0400410retry:
411 spin_lock_irqsave(&cgwb_lock, flags);
412
413 node = &bdi->cgwb_congested_tree.rb_node;
414 parent = NULL;
415
416 while (*node != NULL) {
417 parent = *node;
Geliang Tangbc712262017-02-22 15:45:52 -0800418 congested = rb_entry(parent, struct bdi_writeback_congested,
419 rb_node);
Tejun Heo52ebea72015-05-22 17:13:37 -0400420 if (congested->blkcg_id < blkcg_id)
421 node = &parent->rb_left;
422 else if (congested->blkcg_id > blkcg_id)
423 node = &parent->rb_right;
424 else
425 goto found;
426 }
427
428 if (new_congested) {
429 /* !found and storage for new one already allocated, insert */
430 congested = new_congested;
Tejun Heo52ebea72015-05-22 17:13:37 -0400431 rb_link_node(&congested->rb_node, parent, node);
432 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
Sebastian Andrzej Siewiore58dd0d2018-08-21 21:55:31 -0700433 spin_unlock_irqrestore(&cgwb_lock, flags);
434 return congested;
Tejun Heo52ebea72015-05-22 17:13:37 -0400435 }
436
437 spin_unlock_irqrestore(&cgwb_lock, flags);
438
439 /* allocate storage for new one and retry */
440 new_congested = kzalloc(sizeof(*new_congested), gfp);
441 if (!new_congested)
442 return NULL;
443
Sebastian Andrzej Siewiore58dd0d2018-08-21 21:55:31 -0700444 refcount_set(&new_congested->refcnt, 1);
Jan Karab7d680d2017-03-23 01:36:54 +0100445 new_congested->__bdi = bdi;
Tejun Heo52ebea72015-05-22 17:13:37 -0400446 new_congested->blkcg_id = blkcg_id;
447 goto retry;
448
449found:
Sebastian Andrzej Siewiore58dd0d2018-08-21 21:55:31 -0700450 refcount_inc(&congested->refcnt);
Tejun Heo52ebea72015-05-22 17:13:37 -0400451 spin_unlock_irqrestore(&cgwb_lock, flags);
452 kfree(new_congested);
453 return congested;
454}
455
456/**
457 * wb_congested_put - put a wb_congested
458 * @congested: wb_congested to put
459 *
460 * Put @congested and destroy it if the refcnt reaches zero.
461 */
462void wb_congested_put(struct bdi_writeback_congested *congested)
463{
Tejun Heo52ebea72015-05-22 17:13:37 -0400464 unsigned long flags;
465
Anna-Maria Gleixner060288a72018-08-21 21:55:35 -0700466 if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
Tejun Heo52ebea72015-05-22 17:13:37 -0400467 return;
Tejun Heo52ebea72015-05-22 17:13:37 -0400468
Tejun Heoa20135ff2015-07-01 20:53:37 -0400469 /* bdi might already have been destroyed leaving @congested unlinked */
Jan Karab7d680d2017-03-23 01:36:54 +0100470 if (congested->__bdi) {
Tejun Heoa20135ff2015-07-01 20:53:37 -0400471 rb_erase(&congested->rb_node,
Jan Karab7d680d2017-03-23 01:36:54 +0100472 &congested->__bdi->cgwb_congested_tree);
473 congested->__bdi = NULL;
Tejun Heoa20135ff2015-07-01 20:53:37 -0400474 }
475
Tejun Heo52ebea72015-05-22 17:13:37 -0400476 spin_unlock_irqrestore(&cgwb_lock, flags);
477 kfree(congested);
Tejun Heo52ebea72015-05-22 17:13:37 -0400478}
479
480static void cgwb_release_workfn(struct work_struct *work)
481{
482 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
483 release_work);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400484 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400485
Jan Kara3ee7e862018-06-18 15:46:58 +0200486 mutex_lock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400487 wb_shutdown(wb);
488
489 css_put(wb->memcg_css);
490 css_put(wb->blkcg_css);
Jan Kara3ee7e862018-06-18 15:46:58 +0200491 mutex_unlock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400492
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400493 /* triggers blkg destruction if cgwb_refcnt becomes zero */
494 blkcg_cgwb_put(blkcg);
495
Tejun Heo841710a2015-05-22 18:23:33 -0400496 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400497 percpu_ref_exit(&wb->refcnt);
498 wb_exit(wb);
499 kfree_rcu(wb, rcu);
Tejun Heo52ebea72015-05-22 17:13:37 -0400500}
501
502static void cgwb_release(struct percpu_ref *refcnt)
503{
504 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
505 refcnt);
Tejun Heof1834642018-05-23 10:56:32 -0700506 queue_work(cgwb_release_wq, &wb->release_work);
Tejun Heo52ebea72015-05-22 17:13:37 -0400507}
508
509static void cgwb_kill(struct bdi_writeback *wb)
510{
511 lockdep_assert_held(&cgwb_lock);
512
513 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
514 list_del(&wb->memcg_node);
515 list_del(&wb->blkcg_node);
516 percpu_ref_kill(&wb->refcnt);
517}
518
Jan Karae8cb72b2017-03-23 01:36:56 +0100519static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
520{
521 spin_lock_irq(&cgwb_lock);
522 list_del_rcu(&wb->bdi_node);
523 spin_unlock_irq(&cgwb_lock);
524}
525
Tejun Heo52ebea72015-05-22 17:13:37 -0400526static int cgwb_create(struct backing_dev_info *bdi,
527 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
528{
529 struct mem_cgroup *memcg;
530 struct cgroup_subsys_state *blkcg_css;
531 struct blkcg *blkcg;
532 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
533 struct bdi_writeback *wb;
534 unsigned long flags;
535 int ret = 0;
536
537 memcg = mem_cgroup_from_css(memcg_css);
Tejun Heoc165b3e2015-08-18 14:55:29 -0700538 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
Tejun Heo52ebea72015-05-22 17:13:37 -0400539 blkcg = css_to_blkcg(blkcg_css);
Wang Long9ccc3612018-06-07 17:07:19 -0700540 memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400541 blkcg_cgwb_list = &blkcg->cgwb_list;
542
543 /* look up again under lock and discard on blkcg mismatch */
544 spin_lock_irqsave(&cgwb_lock, flags);
545 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
546 if (wb && wb->blkcg_css != blkcg_css) {
547 cgwb_kill(wb);
548 wb = NULL;
549 }
550 spin_unlock_irqrestore(&cgwb_lock, flags);
551 if (wb)
552 goto out_put;
553
554 /* need to create a new one */
555 wb = kmalloc(sizeof(*wb), gfp);
Christophe JAILLET0b045bd2017-09-11 21:43:23 +0200556 if (!wb) {
557 ret = -ENOMEM;
558 goto out_put;
559 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400560
Tejun Heoa13f35e2015-07-02 08:44:34 -0600561 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
Tejun Heo52ebea72015-05-22 17:13:37 -0400562 if (ret)
563 goto err_free;
564
565 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
566 if (ret)
567 goto err_wb_exit;
568
Tejun Heo841710a2015-05-22 18:23:33 -0400569 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
570 if (ret)
571 goto err_ref_exit;
572
Tejun Heo52ebea72015-05-22 17:13:37 -0400573 wb->memcg_css = memcg_css;
574 wb->blkcg_css = blkcg_css;
575 INIT_WORK(&wb->release_work, cgwb_release_workfn);
576 set_bit(WB_registered, &wb->state);
577
578 /*
579 * The root wb determines the registered state of the whole bdi and
580 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
581 * whether they're still online. Don't link @wb if any is dead.
582 * See wb_memcg_offline() and wb_blkcg_offline().
583 */
584 ret = -ENODEV;
585 spin_lock_irqsave(&cgwb_lock, flags);
586 if (test_bit(WB_registered, &bdi->wb.state) &&
587 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
588 /* we might have raced another instance of this function */
589 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
590 if (!ret) {
Tejun Heob8175252015-10-02 14:47:05 -0400591 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
Tejun Heo52ebea72015-05-22 17:13:37 -0400592 list_add(&wb->memcg_node, memcg_cgwb_list);
593 list_add(&wb->blkcg_node, blkcg_cgwb_list);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400594 blkcg_cgwb_get(blkcg);
Tejun Heo52ebea72015-05-22 17:13:37 -0400595 css_get(memcg_css);
596 css_get(blkcg_css);
597 }
598 }
599 spin_unlock_irqrestore(&cgwb_lock, flags);
600 if (ret) {
601 if (ret == -EEXIST)
602 ret = 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600603 goto err_fprop_exit;
Tejun Heo52ebea72015-05-22 17:13:37 -0400604 }
605 goto out_put;
606
Tejun Heo841710a2015-05-22 18:23:33 -0400607err_fprop_exit:
608 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400609err_ref_exit:
610 percpu_ref_exit(&wb->refcnt);
611err_wb_exit:
612 wb_exit(wb);
613err_free:
614 kfree(wb);
615out_put:
616 css_put(blkcg_css);
617 return ret;
618}
619
620/**
Tejun Heoed288dc2019-08-26 09:06:54 -0700621 * wb_get_lookup - get wb for a given memcg
Tejun Heo52ebea72015-05-22 17:13:37 -0400622 * @bdi: target bdi
623 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
Tejun Heo52ebea72015-05-22 17:13:37 -0400624 *
Tejun Heoed288dc2019-08-26 09:06:54 -0700625 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
626 * refcount incremented.
Tejun Heo52ebea72015-05-22 17:13:37 -0400627 *
628 * This function uses css_get() on @memcg_css and thus expects its refcnt
629 * to be positive on invocation. IOW, rcu_read_lock() protection on
630 * @memcg_css isn't enough. try_get it before calling this function.
631 *
632 * A wb is keyed by its associated memcg. As blkcg implicitly enables
633 * memcg on the default hierarchy, memcg association is guaranteed to be
634 * more specific (equal or descendant to the associated blkcg) and thus can
635 * identify both the memcg and blkcg associations.
636 *
637 * Because the blkcg associated with a memcg may change as blkcg is enabled
638 * and disabled closer to root in the hierarchy, each wb keeps track of
639 * both the memcg and blkcg associated with it and verifies the blkcg on
640 * each lookup. On mismatch, the existing wb is discarded and a new one is
641 * created.
642 */
Tejun Heoed288dc2019-08-26 09:06:54 -0700643struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
644 struct cgroup_subsys_state *memcg_css)
645{
646 struct bdi_writeback *wb;
647
648 if (!memcg_css->parent)
649 return &bdi->wb;
650
651 rcu_read_lock();
652 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
653 if (wb) {
654 struct cgroup_subsys_state *blkcg_css;
655
656 /* see whether the blkcg association has changed */
657 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
658 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
659 wb = NULL;
660 css_put(blkcg_css);
661 }
662 rcu_read_unlock();
663
664 return wb;
665}
666
667/**
668 * wb_get_create - get wb for a given memcg, create if necessary
669 * @bdi: target bdi
670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
671 * @gfp: allocation mask to use
672 *
673 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
674 * create one. See wb_get_lookup() for more details.
675 */
Tejun Heo52ebea72015-05-22 17:13:37 -0400676struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
677 struct cgroup_subsys_state *memcg_css,
678 gfp_t gfp)
679{
680 struct bdi_writeback *wb;
681
Mel Gormand0164ad2015-11-06 16:28:21 -0800682 might_sleep_if(gfpflags_allow_blocking(gfp));
Tejun Heo52ebea72015-05-22 17:13:37 -0400683
684 if (!memcg_css->parent)
685 return &bdi->wb;
686
687 do {
Tejun Heoed288dc2019-08-26 09:06:54 -0700688 wb = wb_get_lookup(bdi, memcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400689 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
690
691 return wb;
692}
693
Tejun Heoa13f35e2015-07-02 08:44:34 -0600694static int cgwb_bdi_init(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400695{
Tejun Heoa13f35e2015-07-02 08:44:34 -0600696 int ret;
697
Tejun Heo52ebea72015-05-22 17:13:37 -0400698 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
699 bdi->cgwb_congested_tree = RB_ROOT;
Jan Kara3ee7e862018-06-18 15:46:58 +0200700 mutex_init(&bdi->cgwb_release_mutex);
Tejun Heo7fc58542017-12-12 08:38:30 -0800701 init_rwsem(&bdi->wb_switch_rwsem);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600702
703 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
704 if (!ret) {
Johannes Weiner7d828602016-01-14 15:20:56 -0800705 bdi->wb.memcg_css = &root_mem_cgroup->css;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600706 bdi->wb.blkcg_css = blkcg_root_css;
707 }
708 return ret;
Tejun Heo52ebea72015-05-22 17:13:37 -0400709}
710
Jan Karab1c51af2017-03-23 01:36:59 +0100711static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400712{
713 struct radix_tree_iter iter;
714 void **slot;
Jan Kara5318ce72017-03-23 01:36:57 +0100715 struct bdi_writeback *wb;
Tejun Heo52ebea72015-05-22 17:13:37 -0400716
717 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
718
719 spin_lock_irq(&cgwb_lock);
720 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
721 cgwb_kill(*slot);
Jan Kara3ee7e862018-06-18 15:46:58 +0200722 spin_unlock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100723
Jan Kara3ee7e862018-06-18 15:46:58 +0200724 mutex_lock(&bdi->cgwb_release_mutex);
725 spin_lock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100726 while (!list_empty(&bdi->wb_list)) {
727 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
728 bdi_node);
729 spin_unlock_irq(&cgwb_lock);
730 wb_shutdown(wb);
731 spin_lock_irq(&cgwb_lock);
732 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400733 spin_unlock_irq(&cgwb_lock);
Jan Kara3ee7e862018-06-18 15:46:58 +0200734 mutex_unlock(&bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400735}
736
737/**
738 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
739 * @memcg: memcg being offlined
740 *
741 * Also prevents creation of any new wb's associated with @memcg.
742 */
743void wb_memcg_offline(struct mem_cgroup *memcg)
744{
Wang Long9ccc3612018-06-07 17:07:19 -0700745 struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400746 struct bdi_writeback *wb, *next;
747
748 spin_lock_irq(&cgwb_lock);
749 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
750 cgwb_kill(wb);
751 memcg_cgwb_list->next = NULL; /* prevent new wb's */
752 spin_unlock_irq(&cgwb_lock);
753}
754
755/**
756 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
757 * @blkcg: blkcg being offlined
758 *
759 * Also prevents creation of any new wb's associated with @blkcg.
760 */
761void wb_blkcg_offline(struct blkcg *blkcg)
762{
Tejun Heo52ebea72015-05-22 17:13:37 -0400763 struct bdi_writeback *wb, *next;
764
765 spin_lock_irq(&cgwb_lock);
766 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
767 cgwb_kill(wb);
768 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
769 spin_unlock_irq(&cgwb_lock);
770}
771
Jan Karadf23de52017-03-08 17:48:32 +0100772static void cgwb_bdi_exit(struct backing_dev_info *bdi)
773{
774 struct rb_node *rbn;
775
776 spin_lock_irq(&cgwb_lock);
777 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
778 struct bdi_writeback_congested *congested =
779 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
780
781 rb_erase(rbn, &bdi->cgwb_congested_tree);
Jan Karab7d680d2017-03-23 01:36:54 +0100782 congested->__bdi = NULL; /* mark @congested unlinked */
Jan Karadf23de52017-03-08 17:48:32 +0100783 }
784 spin_unlock_irq(&cgwb_lock);
785}
786
Jan Karae8cb72b2017-03-23 01:36:56 +0100787static void cgwb_bdi_register(struct backing_dev_info *bdi)
788{
789 spin_lock_irq(&cgwb_lock);
790 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
791 spin_unlock_irq(&cgwb_lock);
792}
793
Tejun Heof1834642018-05-23 10:56:32 -0700794static int __init cgwb_init(void)
795{
796 /*
797 * There can be many concurrent release work items overwhelming
798 * system_wq. Put them in a separate wq and limit concurrency.
799 * There's no point in executing many of these in parallel.
800 */
801 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
802 if (!cgwb_release_wq)
803 return -ENOMEM;
804
805 return 0;
806}
807subsys_initcall(cgwb_init);
808
Tejun Heo52ebea72015-05-22 17:13:37 -0400809#else /* CONFIG_CGROUP_WRITEBACK */
810
Tejun Heoa13f35e2015-07-02 08:44:34 -0600811static int cgwb_bdi_init(struct backing_dev_info *bdi)
812{
813 int err;
814
815 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
816 if (!bdi->wb_congested)
817 return -ENOMEM;
818
Sebastian Andrzej Siewiore58dd0d2018-08-21 21:55:31 -0700819 refcount_set(&bdi->wb_congested->refcnt, 1);
Tejun Heo5f478e42017-02-08 15:19:07 -0500820
Tejun Heoa13f35e2015-07-02 08:44:34 -0600821 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
822 if (err) {
Tejun Heo5f478e42017-02-08 15:19:07 -0500823 wb_congested_put(bdi->wb_congested);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600824 return err;
825 }
826 return 0;
827}
828
Jan Karab1c51af2017-03-23 01:36:59 +0100829static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
Jan Karadf23de52017-03-08 17:48:32 +0100830
831static void cgwb_bdi_exit(struct backing_dev_info *bdi)
Tejun Heo5f478e42017-02-08 15:19:07 -0500832{
833 wb_congested_put(bdi->wb_congested);
834}
Tejun Heo52ebea72015-05-22 17:13:37 -0400835
Jan Karae8cb72b2017-03-23 01:36:56 +0100836static void cgwb_bdi_register(struct backing_dev_info *bdi)
837{
838 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
839}
840
841static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
842{
843 list_del_rcu(&wb->bdi_node);
844}
845
Tejun Heo52ebea72015-05-22 17:13:37 -0400846#endif /* CONFIG_CGROUP_WRITEBACK */
847
Jan Kara2e82b842017-04-12 12:24:48 +0200848static int bdi_init(struct backing_dev_info *bdi)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700849{
Tejun Heob8175252015-10-02 14:47:05 -0400850 int ret;
851
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700852 bdi->dev = NULL;
853
Jan Karad03f6cd2017-02-02 15:56:51 +0100854 kref_init(&bdi->refcnt);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700855 bdi->min_ratio = 0;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700856 bdi->max_ratio = 100;
Jan Karaeb608e32012-05-24 18:59:11 +0200857 bdi->max_prop_frac = FPROP_FRAC_BASE;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200858 INIT_LIST_HEAD(&bdi->bdi_list);
Tejun Heob8175252015-10-02 14:47:05 -0400859 INIT_LIST_HEAD(&bdi->wb_list);
Tejun Heocc395d72015-05-22 17:13:58 -0400860 init_waitqueue_head(&bdi->wb_waitq);
Jens Axboe03ba3782009-09-09 09:08:54 +0200861
Tejun Heob8175252015-10-02 14:47:05 -0400862 ret = cgwb_bdi_init(bdi);
863
Tejun Heob8175252015-10-02 14:47:05 -0400864 return ret;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700865}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700866
Jan Karad03f6cd2017-02-02 15:56:51 +0100867struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
868{
869 struct backing_dev_info *bdi;
870
871 bdi = kmalloc_node(sizeof(struct backing_dev_info),
872 gfp_mask | __GFP_ZERO, node_id);
873 if (!bdi)
874 return NULL;
875
876 if (bdi_init(bdi)) {
877 kfree(bdi);
878 return NULL;
879 }
880 return bdi;
881}
Jan Kara62bf42a2017-04-12 12:24:27 +0200882EXPORT_SYMBOL(bdi_alloc_node);
Jan Karad03f6cd2017-02-02 15:56:51 +0100883
Tejun Heo34f8fe52019-08-26 09:06:53 -0700884static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
885{
886 struct rb_node **p = &bdi_tree.rb_node;
887 struct rb_node *parent = NULL;
888 struct backing_dev_info *bdi;
889
890 lockdep_assert_held(&bdi_lock);
891
892 while (*p) {
893 parent = *p;
894 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
895
896 if (bdi->id > id)
897 p = &(*p)->rb_left;
898 else if (bdi->id < id)
899 p = &(*p)->rb_right;
900 else
901 break;
902 }
903
904 if (parentp)
905 *parentp = parent;
906 return p;
907}
908
909/**
910 * bdi_get_by_id - lookup and get bdi from its id
911 * @id: bdi id to lookup
912 *
913 * Find bdi matching @id and get it. Returns NULL if the matching bdi
914 * doesn't exist or is already unregistered.
915 */
916struct backing_dev_info *bdi_get_by_id(u64 id)
917{
918 struct backing_dev_info *bdi = NULL;
919 struct rb_node **p;
920
921 spin_lock_bh(&bdi_lock);
922 p = bdi_lookup_rb_node(id, NULL);
923 if (*p) {
924 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
925 bdi_get(bdi);
926 }
927 spin_unlock_bh(&bdi_lock);
928
929 return bdi;
930}
931
Jan Kara7c4cc302017-04-12 12:24:49 +0200932int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700933{
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700934 struct device *dev;
Tejun Heo34f8fe52019-08-26 09:06:53 -0700935 struct rb_node *parent, **p;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700936
937 if (bdi->dev) /* The driver needs to use separate queues per device */
938 return 0;
939
Jan Kara7c4cc302017-04-12 12:24:49 +0200940 dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700941 if (IS_ERR(dev))
942 return PTR_ERR(dev);
943
Jan Karae8cb72b2017-03-23 01:36:56 +0100944 cgwb_bdi_register(bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700945 bdi->dev = dev;
946
Jens Axboe6d0e4822017-12-21 10:01:30 -0700947 bdi_debug_register(bdi, dev_name(dev));
Tejun Heo46100072015-05-22 17:13:31 -0400948 set_bit(WB_registered, &bdi->wb.state);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700949
950 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700951
952 bdi->id = ++bdi_id_cursor;
953
954 p = bdi_lookup_rb_node(bdi->id, &parent);
955 rb_link_node(&bdi->rb_node, parent, p);
956 rb_insert_color(&bdi->rb_node, &bdi_tree);
957
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700958 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700959
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700960 spin_unlock_bh(&bdi_lock);
961
962 trace_writeback_bdi_register(bdi);
963 return 0;
964}
Jan Karabaf7a612017-04-12 12:24:25 +0200965EXPORT_SYMBOL(bdi_register_va);
966
Jan Kara7c4cc302017-04-12 12:24:49 +0200967int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
Jan Karabaf7a612017-04-12 12:24:25 +0200968{
969 va_list args;
970 int ret;
971
972 va_start(args, fmt);
Jan Kara7c4cc302017-04-12 12:24:49 +0200973 ret = bdi_register_va(bdi, fmt, args);
Jan Karabaf7a612017-04-12 12:24:25 +0200974 va_end(args);
975 return ret;
976}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700977EXPORT_SYMBOL(bdi_register);
978
Dan Williamsdf08c322016-07-31 11:15:13 -0700979int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
980{
981 int rc;
982
Jan Kara7c4cc302017-04-12 12:24:49 +0200983 rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
Dan Williamsdf08c322016-07-31 11:15:13 -0700984 if (rc)
985 return rc;
Jan Karab6f8fec2017-03-08 17:48:31 +0100986 /* Leaking owner reference... */
987 WARN_ON(bdi->owner);
Dan Williamsdf08c322016-07-31 11:15:13 -0700988 bdi->owner = owner;
989 get_device(owner);
990 return 0;
991}
992EXPORT_SYMBOL(bdi_register_owner);
993
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700994/*
Tejun Heo46100072015-05-22 17:13:31 -0400995 * Remove bdi from bdi_list, and ensure that it is no longer visible
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700996 */
Tejun Heo46100072015-05-22 17:13:31 -0400997static void bdi_remove_from_list(struct backing_dev_info *bdi)
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700998{
Tejun Heo46100072015-05-22 17:13:31 -0400999 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -07001000 rb_erase(&bdi->rb_node, &bdi_tree);
Tejun Heo46100072015-05-22 17:13:31 -04001001 list_del_rcu(&bdi->bdi_list);
1002 spin_unlock_bh(&bdi_lock);
Andrew Morton3fcfab12006-10-19 23:28:16 -07001003
Tejun Heo46100072015-05-22 17:13:31 -04001004 synchronize_rcu_expedited();
Andrew Morton3fcfab12006-10-19 23:28:16 -07001005}
1006
Tejun Heob02176f2015-09-08 12:20:22 -04001007void bdi_unregister(struct backing_dev_info *bdi)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001008{
Tejun Heof0054bb2015-05-22 17:13:30 -04001009 /* make sure nobody finds us on the bdi_list anymore */
1010 bdi_remove_from_list(bdi);
1011 wb_shutdown(&bdi->wb);
Jan Karab1c51af2017-03-23 01:36:59 +01001012 cgwb_bdi_unregister(bdi);
Rabin Vincent7a401a92011-11-11 13:29:04 +01001013
Christoph Hellwigc4db59d2015-01-20 14:05:00 -07001014 if (bdi->dev) {
1015 bdi_debug_unregister(bdi);
1016 device_unregister(bdi->dev);
1017 bdi->dev = NULL;
1018 }
Dan Williamsdf08c322016-07-31 11:15:13 -07001019
1020 if (bdi->owner) {
1021 put_device(bdi->owner);
1022 bdi->owner = NULL;
1023 }
Tejun Heob02176f2015-09-08 12:20:22 -04001024}
Christoph Hellwigc4db59d2015-01-20 14:05:00 -07001025
Jan Karad03f6cd2017-02-02 15:56:51 +01001026static void release_bdi(struct kref *ref)
1027{
1028 struct backing_dev_info *bdi =
1029 container_of(ref, struct backing_dev_info, refcnt);
1030
Jan Kara5af110b2017-04-12 12:24:26 +02001031 if (test_bit(WB_registered, &bdi->wb.state))
1032 bdi_unregister(bdi);
Jan Kara2e82b842017-04-12 12:24:48 +02001033 WARN_ON_ONCE(bdi->dev);
1034 wb_exit(&bdi->wb);
1035 cgwb_bdi_exit(bdi);
Jan Karad03f6cd2017-02-02 15:56:51 +01001036 kfree(bdi);
1037}
1038
1039void bdi_put(struct backing_dev_info *bdi)
1040{
1041 kref_put(&bdi->refcnt, release_bdi);
1042}
Jan Kara62bf42a2017-04-12 12:24:27 +02001043EXPORT_SYMBOL(bdi_put);
Jan Karad03f6cd2017-02-02 15:56:51 +01001044
Andrew Morton3fcfab12006-10-19 23:28:16 -07001045static wait_queue_head_t congestion_wqh[2] = {
1046 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
1047 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
1048 };
Tejun Heoec8a6f22015-05-22 17:13:41 -04001049static atomic_t nr_wb_congested[2];
Andrew Morton3fcfab12006-10-19 23:28:16 -07001050
Tejun Heoec8a6f22015-05-22 17:13:41 -04001051void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001052{
Jens Axboe1faa16d2009-04-06 14:48:01 +02001053 wait_queue_head_t *wqh = &congestion_wqh[sync];
Kaixu Xiac877ef82016-03-31 13:19:41 +00001054 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -07001055
Tejun Heo44522262015-05-22 17:13:26 -04001056 bit = sync ? WB_sync_congested : WB_async_congested;
Tejun Heoec8a6f22015-05-22 17:13:41 -04001057 if (test_and_clear_bit(bit, &congested->state))
1058 atomic_dec(&nr_wb_congested[sync]);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001059 smp_mb__after_atomic();
Andrew Morton3fcfab12006-10-19 23:28:16 -07001060 if (waitqueue_active(wqh))
1061 wake_up(wqh);
1062}
Tejun Heoec8a6f22015-05-22 17:13:41 -04001063EXPORT_SYMBOL(clear_wb_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -07001064
Tejun Heoec8a6f22015-05-22 17:13:41 -04001065void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001066{
Kaixu Xiac877ef82016-03-31 13:19:41 +00001067 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -07001068
Tejun Heo44522262015-05-22 17:13:26 -04001069 bit = sync ? WB_sync_congested : WB_async_congested;
Tejun Heoec8a6f22015-05-22 17:13:41 -04001070 if (!test_and_set_bit(bit, &congested->state))
1071 atomic_inc(&nr_wb_congested[sync]);
Andrew Morton3fcfab12006-10-19 23:28:16 -07001072}
Tejun Heoec8a6f22015-05-22 17:13:41 -04001073EXPORT_SYMBOL(set_wb_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -07001074
1075/**
1076 * congestion_wait - wait for a backing_dev to become uncongested
Jens Axboe8aa7e842009-07-09 14:52:32 +02001077 * @sync: SYNC or ASYNC IO
Andrew Morton3fcfab12006-10-19 23:28:16 -07001078 * @timeout: timeout in jiffies
1079 *
1080 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1081 * write congestion. If no backing_devs are congested then just wait for the
1082 * next write to be completed.
1083 */
Jens Axboe8aa7e842009-07-09 14:52:32 +02001084long congestion_wait(int sync, long timeout)
Andrew Morton3fcfab12006-10-19 23:28:16 -07001085{
1086 long ret;
Mel Gorman52bb9192010-10-26 14:21:41 -07001087 unsigned long start = jiffies;
Andrew Morton3fcfab12006-10-19 23:28:16 -07001088 DEFINE_WAIT(wait);
Jens Axboe8aa7e842009-07-09 14:52:32 +02001089 wait_queue_head_t *wqh = &congestion_wqh[sync];
Andrew Morton3fcfab12006-10-19 23:28:16 -07001090
1091 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1092 ret = io_schedule_timeout(timeout);
1093 finish_wait(wqh, &wait);
Mel Gorman52bb9192010-10-26 14:21:41 -07001094
1095 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1096 jiffies_to_usecs(jiffies - start));
1097
Andrew Morton3fcfab12006-10-19 23:28:16 -07001098 return ret;
1099}
1100EXPORT_SYMBOL(congestion_wait);
1101
Mel Gorman0e093d992010-10-26 14:21:45 -07001102/**
Mel Gorman599d0c92016-07-28 15:45:31 -07001103 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
Mel Gorman0e093d992010-10-26 14:21:45 -07001104 * @sync: SYNC or ASYNC IO
1105 * @timeout: timeout in jiffies
1106 *
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001107 * In the event of a congested backing_dev (any backing_dev) this waits
1108 * for up to @timeout jiffies for either a BDI to exit congestion of the
1109 * given @sync queue or a write to complete.
Mel Gorman0e093d992010-10-26 14:21:45 -07001110 *
1111 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1112 * it is the number of jiffies that were still remaining when the function
1113 * returned. return_value == timeout implies the function did not sleep.
1114 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001115long wait_iff_congested(int sync, long timeout)
Mel Gorman0e093d992010-10-26 14:21:45 -07001116{
1117 long ret;
1118 unsigned long start = jiffies;
1119 DEFINE_WAIT(wait);
1120 wait_queue_head_t *wqh = &congestion_wqh[sync];
1121
1122 /*
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001123 * If there is no congestion, yield if necessary instead
Mel Gorman0e093d992010-10-26 14:21:45 -07001124 * of sleeping on the congestion queue
1125 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001126 if (atomic_read(&nr_wb_congested[sync]) == 0) {
Michal Hockoede37712016-05-20 16:57:03 -07001127 cond_resched();
Mel Gorman599d0c92016-07-28 15:45:31 -07001128
Mel Gorman0e093d992010-10-26 14:21:45 -07001129 /* In case we scheduled, work out time remaining */
1130 ret = timeout - (jiffies - start);
1131 if (ret < 0)
1132 ret = 0;
1133
1134 goto out;
1135 }
1136
1137 /* Sleep until uncongested or a write happens */
1138 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1139 ret = io_schedule_timeout(timeout);
1140 finish_wait(wqh, &wait);
1141
1142out:
1143 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1144 jiffies_to_usecs(jiffies - start));
1145
1146 return ret;
1147}
1148EXPORT_SYMBOL(wait_iff_congested);