blob: c68bdf58c9a6e1c0c2f28246f7d69b903cc698ac [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Vivek Goyal31e4c282009-12-03 12:59:42 -05002/*
3 * Common Block IO controller cgroup interface
4 *
5 * Based on ideas and code from CFQ, CFS and BFQ:
6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 *
8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9 * Paolo Valente <paolo.valente@unimore.it>
10 *
11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12 * Nauman Rafique <nauman@google.com>
Arianna Avanzinie48453c2015-06-05 23:38:42 +020013 *
14 * For policy-specific per-blkcg data:
15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16 * Arianna Avanzini <avanzini.arianna@gmail.com>
Vivek Goyal31e4c282009-12-03 12:59:42 -050017 */
18#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050019#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050020#include <linux/module.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010021#include <linux/sched/signal.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110022#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070023#include <linux/blkdev.h>
Tejun Heo52ebea72015-05-22 17:13:37 -040024#include <linux/backing-dev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080026#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080027#include <linux/delay.h>
Tejun Heo9a9e8a22012-03-19 15:10:56 -070028#include <linux/atomic.h>
Tejun Heo36aa9e52015-08-18 14:55:31 -070029#include <linux/ctype.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040030#include <linux/blk-cgroup.h>
Josef Bacikd09d8df2018-07-03 11:14:55 -040031#include <linux/tracehook.h>
Josef Bacikfd112c72019-07-09 14:41:29 -070032#include <linux/psi.h>
Tejun Heo5efd6112012-03-05 13:15:12 -080033#include "blk.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050034
Divyesh Shah84c124d2010-04-09 08:31:19 +020035#define MAX_KEY_LEN 100
36
Tejun Heo838f13b2015-07-09 16:39:47 -040037/*
38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
40 * policy [un]register operations including cgroup file additions /
41 * removals. Putting cgroup file registration outside blkcg_pol_mutex
42 * allows grabbing it from cgroup callbacks.
43 */
44static DEFINE_MUTEX(blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -070045static DEFINE_MUTEX(blkcg_pol_mutex);
Tejun Heo923adde2012-03-05 13:15:13 -080046
Arianna Avanzinie48453c2015-06-05 23:38:42 +020047struct blkcg blkcg_root;
Tejun Heo3c798392012-04-16 13:57:25 -070048EXPORT_SYMBOL_GPL(blkcg_root);
Vivek Goyal9d6a9862009-12-04 10:36:41 -050049
Tejun Heo496d5e72015-05-22 17:13:21 -040050struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
Tejun Heo9b0eb692019-06-27 13:39:48 -070051EXPORT_SYMBOL_GPL(blkcg_root_css);
Tejun Heo496d5e72015-05-22 17:13:21 -040052
Tejun Heo3c798392012-04-16 13:57:25 -070053static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
Tejun Heo035d10b2012-03-05 13:15:04 -080054
Tejun Heo7876f932015-07-09 16:39:49 -040055static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
56
Tejun Heo07b0fde2019-07-16 07:58:31 -070057bool blkcg_debug_stats = false;
Tejun Heod3f77df2019-06-27 13:39:52 -070058static struct workqueue_struct *blkcg_punt_bio_wq;
Josef Bacik903d23f2018-07-03 11:14:52 -040059
Tejun Heoa2b16932012-04-13 13:11:33 -070060static bool blkcg_policy_enabled(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -070061 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -070062{
63 return pol && test_bit(pol->plid, q->blkcg_pols);
64}
65
Tejun Heo03814112012-03-05 13:15:14 -080066/**
67 * blkg_free - free a blkg
68 * @blkg: blkg to free
69 *
70 * Free @blkg which may be partially allocated.
71 */
Tejun Heo3c798392012-04-16 13:57:25 -070072static void blkg_free(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -080073{
Tejun Heoe8989fa2012-03-05 13:15:20 -080074 int i;
Tejun Heo549d3aa2012-03-05 13:15:16 -080075
76 if (!blkg)
77 return;
78
Tejun Heodb613672013-05-14 13:52:31 -070079 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo001bea72015-08-18 14:55:11 -070080 if (blkg->pd[i])
81 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
Tejun Heoe8989fa2012-03-05 13:15:20 -080082
Tejun Heof7331642019-11-07 11:18:03 -080083 free_percpu(blkg->iostat_cpu);
Tejun Heoef069b92019-06-13 15:30:39 -070084 percpu_ref_exit(&blkg->refcnt);
Tejun Heo549d3aa2012-03-05 13:15:16 -080085 kfree(blkg);
Tejun Heo03814112012-03-05 13:15:14 -080086}
87
Dennis Zhou7fcf2b02018-12-05 12:10:38 -050088static void __blkg_release(struct rcu_head *rcu)
89{
90 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
91
Tejun Heod3f77df2019-06-27 13:39:52 -070092 WARN_ON(!bio_list_empty(&blkg->async_bios));
93
Dennis Zhou7fcf2b02018-12-05 12:10:38 -050094 /* release the blkcg and parent blkg refs this blkg has been holding */
95 css_put(&blkg->blkcg->css);
96 if (blkg->parent)
97 blkg_put(blkg->parent);
Dennis Zhou7fcf2b02018-12-05 12:10:38 -050098 blkg_free(blkg);
99}
100
101/*
102 * A group is RCU protected, but having an rcu lock does not mean that one
103 * can access all the fields of blkg and assume these are valid. For
104 * example, don't try to follow throtl_data and request queue links.
105 *
106 * Having a reference to blkg under an rcu allows accesses to only values
107 * local to groups like group stats and group rate limits.
108 */
109static void blkg_release(struct percpu_ref *ref)
110{
111 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
112
113 call_rcu(&blkg->rcu_head, __blkg_release);
114}
115
Tejun Heod3f77df2019-06-27 13:39:52 -0700116static void blkg_async_bio_workfn(struct work_struct *work)
117{
118 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
119 async_bio_work);
120 struct bio_list bios = BIO_EMPTY_LIST;
121 struct bio *bio;
Xianting Tian192f1c62020-09-10 14:15:06 +0800122 struct blk_plug plug;
123 bool need_plug = false;
Tejun Heod3f77df2019-06-27 13:39:52 -0700124
125 /* as long as there are pending bios, @blkg can't go away */
126 spin_lock_bh(&blkg->async_bio_lock);
127 bio_list_merge(&bios, &blkg->async_bios);
128 bio_list_init(&blkg->async_bios);
129 spin_unlock_bh(&blkg->async_bio_lock);
130
Xianting Tian192f1c62020-09-10 14:15:06 +0800131 /* start plug only when bio_list contains at least 2 bios */
132 if (bios.head && bios.head->bi_next) {
133 need_plug = true;
134 blk_start_plug(&plug);
135 }
Tejun Heod3f77df2019-06-27 13:39:52 -0700136 while ((bio = bio_list_pop(&bios)))
137 submit_bio(bio);
Xianting Tian192f1c62020-09-10 14:15:06 +0800138 if (need_plug)
139 blk_finish_plug(&plug);
Tejun Heod3f77df2019-06-27 13:39:52 -0700140}
141
Tejun Heo03814112012-03-05 13:15:14 -0800142/**
143 * blkg_alloc - allocate a blkg
144 * @blkcg: block cgroup the new blkg is associated with
145 * @q: request_queue the new blkg is associated with
Tejun Heo15974992012-06-04 20:40:52 -0700146 * @gfp_mask: allocation mask to use
Tejun Heo03814112012-03-05 13:15:14 -0800147 *
Tejun Heoe8989fa2012-03-05 13:15:20 -0800148 * Allocate a new blkg assocating @blkcg and @q.
Tejun Heo03814112012-03-05 13:15:14 -0800149 */
Tejun Heo15974992012-06-04 20:40:52 -0700150static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
151 gfp_t gfp_mask)
Tejun Heo03814112012-03-05 13:15:14 -0800152{
Tejun Heo3c798392012-04-16 13:57:25 -0700153 struct blkcg_gq *blkg;
Tejun Heof7331642019-11-07 11:18:03 -0800154 int i, cpu;
Tejun Heo03814112012-03-05 13:15:14 -0800155
156 /* alloc and init base part */
Tejun Heo15974992012-06-04 20:40:52 -0700157 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
Tejun Heo03814112012-03-05 13:15:14 -0800158 if (!blkg)
159 return NULL;
160
Tejun Heoef069b92019-06-13 15:30:39 -0700161 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
162 goto err_free;
163
Tejun Heof7331642019-11-07 11:18:03 -0800164 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
165 if (!blkg->iostat_cpu)
Tejun Heo77ea7332015-08-18 14:55:24 -0700166 goto err_free;
167
Tejun Heoc875f4d2012-03-05 13:15:22 -0800168 blkg->q = q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800169 INIT_LIST_HEAD(&blkg->q_node);
Tejun Heod3f77df2019-06-27 13:39:52 -0700170 spin_lock_init(&blkg->async_bio_lock);
171 bio_list_init(&blkg->async_bios);
172 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
Tejun Heo03814112012-03-05 13:15:14 -0800173 blkg->blkcg = blkcg;
Tejun Heo03814112012-03-05 13:15:14 -0800174
Tejun Heof7331642019-11-07 11:18:03 -0800175 u64_stats_init(&blkg->iostat.sync);
176 for_each_possible_cpu(cpu)
177 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
178
Tejun Heo8bd435b2012-04-13 13:11:28 -0700179 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700180 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heoe8989fa2012-03-05 13:15:20 -0800181 struct blkg_policy_data *pd;
Tejun Heo03814112012-03-05 13:15:14 -0800182
Tejun Heoa2b16932012-04-13 13:11:33 -0700183 if (!blkcg_policy_enabled(q, pol))
Tejun Heoe8989fa2012-03-05 13:15:20 -0800184 continue;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800185
Tejun Heoe8989fa2012-03-05 13:15:20 -0800186 /* alloc per-policy data and attach it to blkg */
Tejun Heocf09a8e2019-08-28 15:05:51 -0700187 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
Tejun Heoa0516612012-06-26 15:05:44 -0700188 if (!pd)
189 goto err_free;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800190
Tejun Heoe8989fa2012-03-05 13:15:20 -0800191 blkg->pd[i] = pd;
192 pd->blkg = blkg;
Tejun Heob276a872013-01-09 08:05:12 -0800193 pd->plid = i;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800194 }
195
Tejun Heo03814112012-03-05 13:15:14 -0800196 return blkg;
Tejun Heoa0516612012-06-26 15:05:44 -0700197
198err_free:
199 blkg_free(blkg);
200 return NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800201}
202
Tejun Heo24f29042015-08-18 14:55:17 -0700203struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
204 struct request_queue *q, bool update_hint)
Tejun Heo80fd9972012-04-13 14:50:53 -0700205{
Tejun Heo3c798392012-04-16 13:57:25 -0700206 struct blkcg_gq *blkg;
Tejun Heo80fd9972012-04-13 14:50:53 -0700207
Tejun Heoa6371202012-04-19 16:29:24 -0700208 /*
Tejun Heo86cde6b2013-01-09 08:05:10 -0800209 * Hint didn't match. Look up from the radix tree. Note that the
210 * hint can only be updated under queue_lock as otherwise @blkg
211 * could have already been removed from blkg_tree. The caller is
212 * responsible for grabbing queue_lock if @update_hint.
Tejun Heoa6371202012-04-19 16:29:24 -0700213 */
214 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800215 if (blkg && blkg->q == q) {
216 if (update_hint) {
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700217 lockdep_assert_held(&q->queue_lock);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800218 rcu_assign_pointer(blkcg->blkg_hint, blkg);
219 }
Tejun Heoa6371202012-04-19 16:29:24 -0700220 return blkg;
Tejun Heo86cde6b2013-01-09 08:05:10 -0800221 }
Tejun Heoa6371202012-04-19 16:29:24 -0700222
Tejun Heo80fd9972012-04-13 14:50:53 -0700223 return NULL;
224}
Tejun Heoae118892015-08-18 14:55:20 -0700225EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
Tejun Heo80fd9972012-04-13 14:50:53 -0700226
Tejun Heo15974992012-06-04 20:40:52 -0700227/*
Jens Axboed708f0d2017-03-29 11:25:48 -0600228 * If @new_blkg is %NULL, this function tries to allocate a new one as
229 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
Tejun Heo15974992012-06-04 20:40:52 -0700230 */
Tejun Heo86cde6b2013-01-09 08:05:10 -0800231static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
Jens Axboed708f0d2017-03-29 11:25:48 -0600232 struct request_queue *q,
233 struct blkcg_gq *new_blkg)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400234{
Jens Axboed708f0d2017-03-29 11:25:48 -0600235 struct blkcg_gq *blkg;
Tejun Heof427d902013-01-09 08:05:12 -0800236 int i, ret;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400237
Tejun Heocd1604f2012-03-05 13:15:06 -0800238 WARN_ON_ONCE(!rcu_read_lock_held());
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700239 lockdep_assert_held(&q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500240
Dennis Zhou0273ac32018-12-11 18:03:08 -0500241 /* request_queue is dying, do not create/recreate a blkg */
242 if (blk_queue_dying(q)) {
243 ret = -ENODEV;
244 goto err_free_blkg;
245 }
246
Tejun Heo7ee9c562012-03-05 13:15:11 -0800247 /* blkg holds a reference to blkcg */
Tejun Heoec903c02014-05-13 12:11:01 -0400248 if (!css_tryget_online(&blkcg->css)) {
Tejun Heo20386ce2015-08-18 14:55:28 -0700249 ret = -ENODEV;
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800250 goto err_free_blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700251 }
Tejun Heocd1604f2012-03-05 13:15:06 -0800252
Jens Axboed708f0d2017-03-29 11:25:48 -0600253 /* allocate */
254 if (!new_blkg) {
255 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
256 if (unlikely(!new_blkg)) {
257 ret = -ENOMEM;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200258 goto err_put_css;
Jens Axboed708f0d2017-03-29 11:25:48 -0600259 }
260 }
261 blkg = new_blkg;
Tahsin Erdogan7fc6b872017-03-09 00:05:31 -0800262
Tejun Heodb613672013-05-14 13:52:31 -0700263 /* link parent */
Tejun Heo3c547862013-01-09 08:05:10 -0800264 if (blkcg_parent(blkcg)) {
265 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
266 if (WARN_ON_ONCE(!blkg->parent)) {
Tejun Heo20386ce2015-08-18 14:55:28 -0700267 ret = -ENODEV;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200268 goto err_put_css;
Tejun Heo3c547862013-01-09 08:05:10 -0800269 }
270 blkg_get(blkg->parent);
271 }
272
Tejun Heodb613672013-05-14 13:52:31 -0700273 /* invoke per-policy init */
274 for (i = 0; i < BLKCG_MAX_POLS; i++) {
275 struct blkcg_policy *pol = blkcg_policy[i];
276
277 if (blkg->pd[i] && pol->pd_init_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -0700278 pol->pd_init_fn(blkg->pd[i]);
Tejun Heodb613672013-05-14 13:52:31 -0700279 }
280
281 /* insert */
Tejun Heoa6371202012-04-19 16:29:24 -0700282 spin_lock(&blkcg->lock);
283 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
284 if (likely(!ret)) {
285 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
286 list_add(&blkg->q_node, &q->blkg_list);
Tejun Heof427d902013-01-09 08:05:12 -0800287
288 for (i = 0; i < BLKCG_MAX_POLS; i++) {
289 struct blkcg_policy *pol = blkcg_policy[i];
290
291 if (blkg->pd[i] && pol->pd_online_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -0700292 pol->pd_online_fn(blkg->pd[i]);
Tejun Heof427d902013-01-09 08:05:12 -0800293 }
Tejun Heoa6371202012-04-19 16:29:24 -0700294 }
Tejun Heof427d902013-01-09 08:05:12 -0800295 blkg->online = true;
Tejun Heoa6371202012-04-19 16:29:24 -0700296 spin_unlock(&blkcg->lock);
297
Tejun Heoec13b1d2015-05-22 17:13:19 -0400298 if (!ret)
Tejun Heoa6371202012-04-19 16:29:24 -0700299 return blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700300
Tejun Heo3c547862013-01-09 08:05:10 -0800301 /* @blkg failed fully initialized, use the usual release path */
302 blkg_put(blkg);
303 return ERR_PTR(ret);
304
Jens Axboed708f0d2017-03-29 11:25:48 -0600305err_put_css:
Tejun Heo496fb782012-04-19 16:29:23 -0700306 css_put(&blkcg->css);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800307err_free_blkg:
Jens Axboed708f0d2017-03-29 11:25:48 -0600308 blkg_free(new_blkg);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800309 return ERR_PTR(ret);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500310}
Tejun Heo3c96cb32012-04-13 13:11:34 -0700311
Tejun Heo86cde6b2013-01-09 08:05:10 -0800312/**
Christoph Hellwig8c546282020-06-27 09:31:52 +0200313 * blkg_lookup_create - lookup blkg, try to create one if not there
Tejun Heo86cde6b2013-01-09 08:05:10 -0800314 * @blkcg: blkcg of interest
315 * @q: request_queue of interest
316 *
317 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
Tejun Heo3c547862013-01-09 08:05:10 -0800318 * create one. blkg creation is performed recursively from blkcg_root such
319 * that all non-root blkg's have access to the parent blkg. This function
Christoph Hellwig8c546282020-06-27 09:31:52 +0200320 * should be called under RCU read lock and takes @q->queue_lock.
Tejun Heo86cde6b2013-01-09 08:05:10 -0800321 *
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500322 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
323 * down from root.
Tejun Heo86cde6b2013-01-09 08:05:10 -0800324 */
Christoph Hellwig8c546282020-06-27 09:31:52 +0200325static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
326 struct request_queue *q)
Tejun Heo3c96cb32012-04-13 13:11:34 -0700327{
Tejun Heo86cde6b2013-01-09 08:05:10 -0800328 struct blkcg_gq *blkg;
Christoph Hellwig8c546282020-06-27 09:31:52 +0200329 unsigned long flags;
Tejun Heo86cde6b2013-01-09 08:05:10 -0800330
331 WARN_ON_ONCE(!rcu_read_lock_held());
Tejun Heo86cde6b2013-01-09 08:05:10 -0800332
Christoph Hellwig8c546282020-06-27 09:31:52 +0200333 blkg = blkg_lookup(blkcg, q);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800334 if (blkg)
335 return blkg;
336
Christoph Hellwig8c546282020-06-27 09:31:52 +0200337 spin_lock_irqsave(&q->queue_lock, flags);
338 blkg = __blkg_lookup(blkcg, q, true);
339 if (blkg)
340 goto found;
341
Tejun Heo3c547862013-01-09 08:05:10 -0800342 /*
343 * Create blkgs walking down from blkcg_root to @blkcg, so that all
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500344 * non-root blkgs have access to their parents. Returns the closest
345 * blkg to the intended blkg should blkg_create() fail.
Tejun Heo3c547862013-01-09 08:05:10 -0800346 */
347 while (true) {
348 struct blkcg *pos = blkcg;
349 struct blkcg *parent = blkcg_parent(blkcg);
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500350 struct blkcg_gq *ret_blkg = q->root_blkg;
Tejun Heo3c547862013-01-09 08:05:10 -0800351
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500352 while (parent) {
353 blkg = __blkg_lookup(parent, q, false);
354 if (blkg) {
355 /* remember closest blkg */
356 ret_blkg = blkg;
357 break;
358 }
Tejun Heo3c547862013-01-09 08:05:10 -0800359 pos = parent;
360 parent = blkcg_parent(parent);
361 }
362
Jens Axboed708f0d2017-03-29 11:25:48 -0600363 blkg = blkg_create(pos, q, NULL);
Christoph Hellwig8c546282020-06-27 09:31:52 +0200364 if (IS_ERR(blkg)) {
365 blkg = ret_blkg;
366 break;
367 }
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500368 if (pos == blkcg)
Christoph Hellwig8c546282020-06-27 09:31:52 +0200369 break;
Dennis Zhoub9789622018-12-05 12:10:27 -0500370 }
371
Christoph Hellwig8c546282020-06-27 09:31:52 +0200372found:
373 spin_unlock_irqrestore(&q->queue_lock, flags);
Dennis Zhoub9789622018-12-05 12:10:27 -0500374 return blkg;
375}
376
Tejun Heo3c798392012-04-16 13:57:25 -0700377static void blkg_destroy(struct blkcg_gq *blkg)
Tejun Heo72e06c22012-03-05 13:15:00 -0800378{
Tejun Heo3c798392012-04-16 13:57:25 -0700379 struct blkcg *blkcg = blkg->blkcg;
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -0400380 int i;
Tejun Heo03aa2642012-03-05 13:15:19 -0800381
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700382 lockdep_assert_held(&blkg->q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800383 lockdep_assert_held(&blkcg->lock);
Tejun Heo03aa2642012-03-05 13:15:19 -0800384
385 /* Something wrong if we are trying to remove same group twice */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800386 WARN_ON_ONCE(list_empty(&blkg->q_node));
Tejun Heo9f13ef62012-03-05 13:15:21 -0800387 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
Tejun Heoa6371202012-04-19 16:29:24 -0700388
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -0400389 for (i = 0; i < BLKCG_MAX_POLS; i++) {
390 struct blkcg_policy *pol = blkcg_policy[i];
391
392 if (blkg->pd[i] && pol->pd_offline_fn)
393 pol->pd_offline_fn(blkg->pd[i]);
394 }
395
Tejun Heof427d902013-01-09 08:05:12 -0800396 blkg->online = false;
397
Tejun Heoa6371202012-04-19 16:29:24 -0700398 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800399 list_del_init(&blkg->q_node);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800400 hlist_del_init_rcu(&blkg->blkcg_node);
Tejun Heo03aa2642012-03-05 13:15:19 -0800401
Tejun Heo03aa2642012-03-05 13:15:19 -0800402 /*
Tejun Heoa6371202012-04-19 16:29:24 -0700403 * Both setting lookup hint to and clearing it from @blkg are done
404 * under queue_lock. If it's not pointing to @blkg now, it never
405 * will. Hint assignment itself can race safely.
406 */
Paul E. McKenneyec6c676a2014-02-17 13:35:57 -0800407 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
Tejun Heoa6371202012-04-19 16:29:24 -0700408 rcu_assign_pointer(blkcg->blkg_hint, NULL);
409
410 /*
Tejun Heo03aa2642012-03-05 13:15:19 -0800411 * Put the reference taken at the time of creation so that when all
412 * queues are gone, group can be destroyed.
413 */
Dennis Zhou7fcf2b02018-12-05 12:10:38 -0500414 percpu_ref_kill(&blkg->refcnt);
Tejun Heo03aa2642012-03-05 13:15:19 -0800415}
416
Tejun Heo9f13ef62012-03-05 13:15:21 -0800417/**
418 * blkg_destroy_all - destroy all blkgs associated with a request_queue
419 * @q: request_queue of interest
Tejun Heo9f13ef62012-03-05 13:15:21 -0800420 *
Tejun Heo3c96cb32012-04-13 13:11:34 -0700421 * Destroy all blkgs associated with @q.
Tejun Heo9f13ef62012-03-05 13:15:21 -0800422 */
Tejun Heo3c96cb32012-04-13 13:11:34 -0700423static void blkg_destroy_all(struct request_queue *q)
Tejun Heo03aa2642012-03-05 13:15:19 -0800424{
Tejun Heo3c798392012-04-16 13:57:25 -0700425 struct blkcg_gq *blkg, *n;
Tejun Heo72e06c22012-03-05 13:15:00 -0800426
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700427 spin_lock_irq(&q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800428 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
Tejun Heo3c798392012-04-16 13:57:25 -0700429 struct blkcg *blkcg = blkg->blkcg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800430
Tejun Heo9f13ef62012-03-05 13:15:21 -0800431 spin_lock(&blkcg->lock);
432 blkg_destroy(blkg);
433 spin_unlock(&blkcg->lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800434 }
Tejun Heo6fe810b2015-09-05 15:47:36 -0400435
436 q->root_blkg = NULL;
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700437 spin_unlock_irq(&q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800438}
439
Tejun Heo182446d2013-08-08 20:11:24 -0400440static int blkcg_reset_stats(struct cgroup_subsys_state *css,
441 struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700442{
Tejun Heo182446d2013-08-08 20:11:24 -0400443 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo3c798392012-04-16 13:57:25 -0700444 struct blkcg_gq *blkg;
Tejun Heof7331642019-11-07 11:18:03 -0800445 int i, cpu;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700446
Tejun Heo838f13b2015-07-09 16:39:47 -0400447 mutex_lock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700448 spin_lock_irq(&blkcg->lock);
Tejun Heo997a0262012-03-08 10:53:58 -0800449
450 /*
451 * Note that stat reset is racy - it doesn't synchronize against
452 * stat updates. This is a debug feature which shouldn't exist
453 * anyway. If you get hit by a race, retry.
454 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800455 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
Tejun Heof7331642019-11-07 11:18:03 -0800456 for_each_possible_cpu(cpu) {
457 struct blkg_iostat_set *bis =
458 per_cpu_ptr(blkg->iostat_cpu, cpu);
459 memset(bis, 0, sizeof(*bis));
460 }
461 memset(&blkg->iostat, 0, sizeof(blkg->iostat));
Tejun Heo77ea7332015-08-18 14:55:24 -0700462
Tejun Heo8bd435b2012-04-13 13:11:28 -0700463 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700464 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800465
Tejun Heoa9520cd2015-08-18 14:55:14 -0700466 if (blkg->pd[i] && pol->pd_reset_stats_fn)
467 pol->pd_reset_stats_fn(blkg->pd[i]);
Tejun Heobc0d6502012-04-13 13:11:26 -0700468 }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700469 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400470
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700471 spin_unlock_irq(&blkcg->lock);
Tejun Heobc0d6502012-04-13 13:11:26 -0700472 mutex_unlock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700473 return 0;
474}
475
Tejun Heodd165eb2015-08-18 14:55:33 -0700476const char *blkg_dev_name(struct blkcg_gq *blkg)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700477{
Tejun Heod3d32e62012-04-01 14:38:42 -0700478 /* some drivers (floppy) instantiate a queue w/o disk registered */
Jan Karadc3b17c2017-02-02 15:56:50 +0100479 if (blkg->q->backing_dev_info->dev)
Yufen Yud51cfc52020-05-04 14:47:55 +0200480 return bdi_dev_name(blkg->q->backing_dev_info);
Tejun Heod3d32e62012-04-01 14:38:42 -0700481 return NULL;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700482}
483
Tejun Heod3d32e62012-04-01 14:38:42 -0700484/**
485 * blkcg_print_blkgs - helper for printing per-blkg data
486 * @sf: seq_file to print to
487 * @blkcg: blkcg of interest
488 * @prfill: fill function to print out a blkg
489 * @pol: policy in question
490 * @data: data to be passed to @prfill
491 * @show_total: to print out sum of prfill return values or not
492 *
493 * This function invokes @prfill on each blkg of @blkcg if pd for the
494 * policy specified by @pol exists. @prfill is invoked with @sf, the
Tejun Heo810ecfa2013-01-09 08:05:13 -0800495 * policy data and @data and the matching queue lock held. If @show_total
496 * is %true, the sum of the return values from @prfill is printed with
497 * "Total" label at the end.
Tejun Heod3d32e62012-04-01 14:38:42 -0700498 *
499 * This is to be used to construct print functions for
500 * cftype->read_seq_string method.
501 */
Tejun Heo3c798392012-04-16 13:57:25 -0700502void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700503 u64 (*prfill)(struct seq_file *,
504 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700505 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700506 bool show_total)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400507{
Tejun Heo3c798392012-04-16 13:57:25 -0700508 struct blkcg_gq *blkg;
Tejun Heod3d32e62012-04-01 14:38:42 -0700509 u64 total = 0;
510
Tejun Heo810ecfa2013-01-09 08:05:13 -0800511 rcu_read_lock();
Linus Torvaldsee89f812013-02-28 12:52:24 -0800512 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700513 spin_lock_irq(&blkg->q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -0700514 if (blkcg_policy_enabled(blkg->q, pol))
Tejun Heof95a04a2012-04-16 13:57:26 -0700515 total += prfill(sf, blkg->pd[pol->plid], data);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700516 spin_unlock_irq(&blkg->q->queue_lock);
Tejun Heo810ecfa2013-01-09 08:05:13 -0800517 }
518 rcu_read_unlock();
Tejun Heod3d32e62012-04-01 14:38:42 -0700519
520 if (show_total)
521 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
522}
Tejun Heo829fdb52012-04-01 14:38:43 -0700523EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
Tejun Heod3d32e62012-04-01 14:38:42 -0700524
525/**
526 * __blkg_prfill_u64 - prfill helper for a single u64 value
527 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700528 * @pd: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700529 * @v: value to print
530 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700531 * Print @v to @sf for the device assocaited with @pd.
Tejun Heod3d32e62012-04-01 14:38:42 -0700532 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700533u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
Tejun Heod3d32e62012-04-01 14:38:42 -0700534{
Tejun Heof95a04a2012-04-16 13:57:26 -0700535 const char *dname = blkg_dev_name(pd->blkg);
Tejun Heod3d32e62012-04-01 14:38:42 -0700536
537 if (!dname)
538 return 0;
539
540 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
541 return v;
542}
Tejun Heo829fdb52012-04-01 14:38:43 -0700543EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
Tejun Heod3d32e62012-04-01 14:38:42 -0700544
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600545/* Performs queue bypass and policy enabled checks then looks up blkg. */
546static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
547 const struct blkcg_policy *pol,
548 struct request_queue *q)
549{
550 WARN_ON_ONCE(!rcu_read_lock_held());
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700551 lockdep_assert_held(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600552
553 if (!blkcg_policy_enabled(q, pol))
554 return ERR_PTR(-EOPNOTSUPP);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600555 return __blkg_lookup(blkcg, q, true /* update_hint */);
556}
557
Tejun Heo16b3de62013-01-09 08:05:12 -0800558/**
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700559 * blkg_conf_prep - parse and prepare for per-blkg config update
Tejun Heo015d2542019-08-28 15:05:53 -0700560 * @inputp: input string pointer
561 *
562 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
563 * from @input and get and return the matching gendisk. *@inputp is
564 * updated to point past the device node prefix. Returns an ERR_PTR()
565 * value on error.
566 *
567 * Use this function iff blkg_conf_prep() can't be used for some reason.
568 */
569struct gendisk *blkcg_conf_get_disk(char **inputp)
570{
571 char *input = *inputp;
572 unsigned int major, minor;
573 struct gendisk *disk;
574 int key_len, part;
575
576 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
577 return ERR_PTR(-EINVAL);
578
579 input += key_len;
580 if (!isspace(*input))
581 return ERR_PTR(-EINVAL);
582 input = skip_spaces(input);
583
584 disk = get_gendisk(MKDEV(major, minor), &part);
585 if (!disk)
586 return ERR_PTR(-ENODEV);
587 if (part) {
588 put_disk_and_module(disk);
589 return ERR_PTR(-ENODEV);
590 }
591
592 *inputp = input;
593 return disk;
594}
595
596/**
597 * blkg_conf_prep - parse and prepare for per-blkg config update
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700598 * @blkcg: target block cgroup
Tejun Heoda8b0662012-04-13 13:11:29 -0700599 * @pol: target policy
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700600 * @input: input string
601 * @ctx: blkg_conf_ctx to be filled
602 *
603 * Parse per-blkg config update from @input and initialize @ctx with the
Tejun Heo36aa9e52015-08-18 14:55:31 -0700604 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
605 * part of @input following MAJ:MIN. This function returns with RCU read
606 * lock and queue lock held and must be paired with blkg_conf_finish().
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700607 */
Tejun Heo3c798392012-04-16 13:57:25 -0700608int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
Tejun Heo36aa9e52015-08-18 14:55:31 -0700609 char *input, struct blkg_conf_ctx *ctx)
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700610 __acquires(rcu) __acquires(&disk->queue->queue_lock)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800611{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700612 struct gendisk *disk;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600613 struct request_queue *q;
Tejun Heo3c798392012-04-16 13:57:25 -0700614 struct blkcg_gq *blkg;
Tejun Heo015d2542019-08-28 15:05:53 -0700615 int ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800616
Tejun Heo015d2542019-08-28 15:05:53 -0700617 disk = blkcg_conf_get_disk(&input);
618 if (IS_ERR(disk))
619 return PTR_ERR(disk);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800620
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600621 q = disk->queue;
622
Tejun Heoe56da7e2012-03-05 13:15:07 -0800623 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700624 spin_lock_irq(&q->queue_lock);
Tejun Heoda8b0662012-04-13 13:11:29 -0700625
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600626 blkg = blkg_lookup_check(blkcg, pol, q);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800627 if (IS_ERR(blkg)) {
628 ret = PTR_ERR(blkg);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600629 goto fail_unlock;
Vivek Goyal062a6442010-09-15 17:06:33 -0400630 }
Tejun Heoe56da7e2012-03-05 13:15:07 -0800631
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600632 if (blkg)
633 goto success;
634
635 /*
636 * Create blkgs walking down from blkcg_root to @blkcg, so that all
637 * non-root blkgs have access to their parents.
638 */
639 while (true) {
640 struct blkcg *pos = blkcg;
641 struct blkcg *parent;
642 struct blkcg_gq *new_blkg;
643
644 parent = blkcg_parent(blkcg);
645 while (parent && !__blkg_lookup(parent, q, false)) {
646 pos = parent;
647 parent = blkcg_parent(parent);
648 }
649
650 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700651 spin_unlock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600652 rcu_read_unlock();
653
654 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
655 if (unlikely(!new_blkg)) {
656 ret = -ENOMEM;
657 goto fail;
658 }
659
Gabriel Krisman Bertazif255c192020-10-22 16:58:42 -0400660 if (radix_tree_preload(GFP_KERNEL)) {
661 blkg_free(new_blkg);
662 ret = -ENOMEM;
663 goto fail;
664 }
665
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600666 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700667 spin_lock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600668
669 blkg = blkg_lookup_check(pos, pol, q);
670 if (IS_ERR(blkg)) {
671 ret = PTR_ERR(blkg);
Gabriel Krisman Bertazi52abfcb2020-10-22 16:58:41 -0400672 blkg_free(new_blkg);
Gabriel Krisman Bertazif255c192020-10-22 16:58:42 -0400673 goto fail_preloaded;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600674 }
675
676 if (blkg) {
677 blkg_free(new_blkg);
678 } else {
679 blkg = blkg_create(pos, q, new_blkg);
Kefeng Wang98d669b2019-06-05 22:24:27 +0800680 if (IS_ERR(blkg)) {
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600681 ret = PTR_ERR(blkg);
Gabriel Krisman Bertazif255c192020-10-22 16:58:42 -0400682 goto fail_preloaded;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600683 }
684 }
685
Gabriel Krisman Bertazif255c192020-10-22 16:58:42 -0400686 radix_tree_preload_end();
687
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600688 if (pos == blkcg)
689 goto success;
690 }
691success:
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700692 ctx->disk = disk;
693 ctx->blkg = blkg;
Tejun Heo015d2542019-08-28 15:05:53 -0700694 ctx->body = input;
Tejun Heo726fa692012-04-01 14:38:43 -0700695 return 0;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600696
Gabriel Krisman Bertazif255c192020-10-22 16:58:42 -0400697fail_preloaded:
698 radix_tree_preload_end();
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600699fail_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700700 spin_unlock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600701 rcu_read_unlock();
702fail:
Jan Kara9df6c292018-02-26 13:01:39 +0100703 put_disk_and_module(disk);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600704 /*
705 * If queue was bypassing, we should retry. Do so after a
706 * short msleep(). It isn't strictly necessary but queue
707 * can be bypassing for some time and it's always nice to
708 * avoid busy looping.
709 */
710 if (ret == -EBUSY) {
711 msleep(10);
712 ret = restart_syscall();
713 }
714 return ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800715}
Pavel Begunkov89f3b6d2019-09-14 20:31:50 +0300716EXPORT_SYMBOL_GPL(blkg_conf_prep);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800717
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700718/**
719 * blkg_conf_finish - finish up per-blkg config update
720 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
721 *
722 * Finish up after per-blkg config update. This function must be paired
723 * with blkg_conf_prep().
724 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700725void blkg_conf_finish(struct blkg_conf_ctx *ctx)
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700726 __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800727{
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700728 spin_unlock_irq(&ctx->disk->queue->queue_lock);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700729 rcu_read_unlock();
Jan Kara9df6c292018-02-26 13:01:39 +0100730 put_disk_and_module(ctx->disk);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800731}
Pavel Begunkov89f3b6d2019-09-14 20:31:50 +0300732EXPORT_SYMBOL_GPL(blkg_conf_finish);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800733
Boris Burkovcd1fc4b2020-06-01 13:11:43 -0700734static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
735{
736 int i;
737
738 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
739 dst->bytes[i] = src->bytes[i];
740 dst->ios[i] = src->ios[i];
741 }
742}
743
744static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
745{
746 int i;
747
748 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
749 dst->bytes[i] += src->bytes[i];
750 dst->ios[i] += src->ios[i];
751 }
752}
753
754static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
755{
756 int i;
757
758 for (i = 0; i < BLKG_IOSTAT_NR; i++) {
759 dst->bytes[i] -= src->bytes[i];
760 dst->ios[i] -= src->ios[i];
761 }
762}
763
764static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
765{
766 struct blkcg *blkcg = css_to_blkcg(css);
767 struct blkcg_gq *blkg;
768
769 rcu_read_lock();
770
771 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
772 struct blkcg_gq *parent = blkg->parent;
773 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
774 struct blkg_iostat cur, delta;
775 unsigned int seq;
776
777 /* fetch the current per-cpu values */
778 do {
779 seq = u64_stats_fetch_begin(&bisc->sync);
780 blkg_iostat_set(&cur, &bisc->cur);
781 } while (u64_stats_fetch_retry(&bisc->sync, seq));
782
783 /* propagate percpu delta to global */
784 u64_stats_update_begin(&blkg->iostat.sync);
785 blkg_iostat_set(&delta, &cur);
786 blkg_iostat_sub(&delta, &bisc->last);
787 blkg_iostat_add(&blkg->iostat.cur, &delta);
788 blkg_iostat_add(&bisc->last, &delta);
789 u64_stats_update_end(&blkg->iostat.sync);
790
791 /* propagate global delta to parent */
792 if (parent) {
793 u64_stats_update_begin(&parent->iostat.sync);
794 blkg_iostat_set(&delta, &blkg->iostat.cur);
795 blkg_iostat_sub(&delta, &blkg->iostat.last);
796 blkg_iostat_add(&parent->iostat.cur, &delta);
797 blkg_iostat_add(&blkg->iostat.last, &delta);
798 u64_stats_update_end(&parent->iostat.sync);
799 }
800 }
801
802 rcu_read_unlock();
803}
804
Boris Burkovef45fe42020-06-01 13:12:05 -0700805/*
806 * The rstat algorithms intentionally don't handle the root cgroup to avoid
807 * incurring overhead when no cgroups are defined. For that reason,
808 * cgroup_rstat_flush in blkcg_print_stat does not actually fill out the
809 * iostat in the root cgroup's blkcg_gq.
810 *
811 * However, we would like to re-use the printing code between the root and
812 * non-root cgroups to the extent possible. For that reason, we simulate
813 * flushing the root cgroup's stats by explicitly filling in the iostat
814 * with disk level statistics.
815 */
816static void blkcg_fill_root_iostats(void)
817{
818 struct class_dev_iter iter;
819 struct device *dev;
820
821 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
822 while ((dev = class_dev_iter_next(&iter))) {
823 struct gendisk *disk = dev_to_disk(dev);
824 struct hd_struct *part = disk_get_part(disk, 0);
825 struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue);
826 struct blkg_iostat tmp;
827 int cpu;
828
829 memset(&tmp, 0, sizeof(tmp));
830 for_each_possible_cpu(cpu) {
831 struct disk_stats *cpu_dkstats;
832
833 cpu_dkstats = per_cpu_ptr(part->dkstats, cpu);
834 tmp.ios[BLKG_IOSTAT_READ] +=
835 cpu_dkstats->ios[STAT_READ];
836 tmp.ios[BLKG_IOSTAT_WRITE] +=
837 cpu_dkstats->ios[STAT_WRITE];
838 tmp.ios[BLKG_IOSTAT_DISCARD] +=
839 cpu_dkstats->ios[STAT_DISCARD];
840 // convert sectors to bytes
841 tmp.bytes[BLKG_IOSTAT_READ] +=
842 cpu_dkstats->sectors[STAT_READ] << 9;
843 tmp.bytes[BLKG_IOSTAT_WRITE] +=
844 cpu_dkstats->sectors[STAT_WRITE] << 9;
845 tmp.bytes[BLKG_IOSTAT_DISCARD] +=
846 cpu_dkstats->sectors[STAT_DISCARD] << 9;
847
848 u64_stats_update_begin(&blkg->iostat.sync);
849 blkg_iostat_set(&blkg->iostat.cur, &tmp);
850 u64_stats_update_end(&blkg->iostat.sync);
851 }
852 }
853}
854
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700855static int blkcg_print_stat(struct seq_file *sf, void *v)
856{
857 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
858 struct blkcg_gq *blkg;
859
Boris Burkovef45fe42020-06-01 13:12:05 -0700860 if (!seq_css(sf)->parent)
861 blkcg_fill_root_iostats();
862 else
863 cgroup_rstat_flush(blkcg->css.cgroup);
864
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700865 rcu_read_lock();
866
867 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
Tejun Heof7331642019-11-07 11:18:03 -0800868 struct blkg_iostat_set *bis = &blkg->iostat;
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700869 const char *dname;
Josef Bacik903d23f2018-07-03 11:14:52 -0400870 char *buf;
Tejun Heo636620b2018-07-18 04:47:41 -0700871 u64 rbytes, wbytes, rios, wios, dbytes, dios;
Josef Bacik903d23f2018-07-03 11:14:52 -0400872 size_t size = seq_get_buf(sf, &buf), off = 0;
873 int i;
874 bool has_stats = false;
Tejun Heof7331642019-11-07 11:18:03 -0800875 unsigned seq;
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700876
Tejun Heob0814362019-11-05 08:09:51 -0800877 spin_lock_irq(&blkg->q->queue_lock);
878
879 if (!blkg->online)
880 goto skip;
881
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700882 dname = blkg_dev_name(blkg);
883 if (!dname)
Tejun Heob0814362019-11-05 08:09:51 -0800884 goto skip;
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700885
Josef Bacik903d23f2018-07-03 11:14:52 -0400886 /*
887 * Hooray string manipulation, count is the size written NOT
888 * INCLUDING THE \0, so size is now count+1 less than what we
889 * had before, but we want to start writing the next bit from
890 * the \0 so we only add count to buf.
891 */
892 off += scnprintf(buf+off, size-off, "%s ", dname);
893
Tejun Heof7331642019-11-07 11:18:03 -0800894 do {
895 seq = u64_stats_fetch_begin(&bis->sync);
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700896
Tejun Heof7331642019-11-07 11:18:03 -0800897 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
898 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
899 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
900 rios = bis->cur.ios[BLKG_IOSTAT_READ];
901 wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
902 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
903 } while (u64_stats_fetch_retry(&bis->sync, seq));
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700904
Josef Bacik903d23f2018-07-03 11:14:52 -0400905 if (rbytes || wbytes || rios || wios) {
906 has_stats = true;
907 off += scnprintf(buf+off, size-off,
Tejun Heo636620b2018-07-18 04:47:41 -0700908 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
909 rbytes, wbytes, rios, wios,
910 dbytes, dios);
Josef Bacik903d23f2018-07-03 11:14:52 -0400911 }
912
Tejun Heo07b0fde2019-07-16 07:58:31 -0700913 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
Josef Bacikd09d8df2018-07-03 11:14:55 -0400914 has_stats = true;
915 off += scnprintf(buf+off, size-off,
916 " use_delay=%d delay_nsec=%llu",
917 atomic_read(&blkg->use_delay),
918 (unsigned long long)atomic64_read(&blkg->delay_nsec));
919 }
920
Josef Bacik903d23f2018-07-03 11:14:52 -0400921 for (i = 0; i < BLKCG_MAX_POLS; i++) {
922 struct blkcg_policy *pol = blkcg_policy[i];
923 size_t written;
924
925 if (!blkg->pd[i] || !pol->pd_stat_fn)
926 continue;
927
928 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
929 if (written)
930 has_stats = true;
931 off += written;
932 }
Tejun Heo07b0fde2019-07-16 07:58:31 -0700933
Josef Bacik903d23f2018-07-03 11:14:52 -0400934 if (has_stats) {
Tejun Heof539da82019-06-13 15:30:38 -0700935 if (off < size - 1) {
936 off += scnprintf(buf+off, size-off, "\n");
937 seq_commit(sf, off);
938 } else {
939 seq_commit(sf, -1);
940 }
Josef Bacik903d23f2018-07-03 11:14:52 -0400941 }
Tejun Heob0814362019-11-05 08:09:51 -0800942 skip:
943 spin_unlock_irq(&blkg->q->queue_lock);
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700944 }
945
946 rcu_read_unlock();
947 return 0;
948}
949
Bart Van Asschee1f3b942016-06-14 17:04:32 +0200950static struct cftype blkcg_files[] = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700951 {
952 .name = "stat",
953 .seq_show = blkcg_print_stat,
954 },
955 { } /* terminate */
956};
957
Bart Van Asschee1f3b942016-06-14 17:04:32 +0200958static struct cftype blkcg_legacy_files[] = {
Vivek Goyal31e4c282009-12-03 12:59:42 -0500959 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200960 .name = "reset_stats",
Tejun Heo3c798392012-04-16 13:57:25 -0700961 .write_u64 = blkcg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500962 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700963 { } /* terminate */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500964};
965
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400966/*
967 * blkcg destruction is a three-stage process.
968 *
969 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
970 * which offlines writeback. Here we tie the next stage of blkg destruction
971 * to the completion of writeback associated with the blkcg. This lets us
972 * avoid punting potentially large amounts of outstanding writeback to root
973 * while maintaining any ongoing policies. The next stage is triggered when
974 * the nr_cgwbs count goes to zero.
975 *
976 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
977 * and handles the destruction of blkgs. Here the css reference held by
978 * the blkg is put back eventually allowing blkcg_css_free() to be called.
979 * This work may occur in cgwb_release_workfn() on the cgwb_release
980 * workqueue. Any submitted ios that fail to get the blkg ref will be
981 * punted to the root_blkg.
982 *
983 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
984 * This finally frees the blkcg.
985 */
986
Tejun Heo9f13ef62012-03-05 13:15:21 -0800987/**
Tejun Heo92fb9742012-11-19 08:13:38 -0800988 * blkcg_css_offline - cgroup css_offline callback
Tejun Heoeb954192013-08-08 20:11:23 -0400989 * @css: css of interest
Tejun Heo9f13ef62012-03-05 13:15:21 -0800990 *
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400991 * This function is called when @css is about to go away. Here the cgwbs are
992 * offlined first and only once writeback associated with the blkcg has
993 * finished do we start step 2 (see above).
Tejun Heo9f13ef62012-03-05 13:15:21 -0800994 */
Tejun Heoeb954192013-08-08 20:11:23 -0400995static void blkcg_css_offline(struct cgroup_subsys_state *css)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500996{
Tejun Heoeb954192013-08-08 20:11:23 -0400997 struct blkcg *blkcg = css_to_blkcg(css);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500998
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400999 /* this prevents anyone from attaching or migrating to this blkcg */
1000 wb_blkcg_offline(blkcg);
1001
Tejun Heod866dbf2019-07-24 10:37:22 -07001002 /* put the base online pin allowing step 2 to be triggered */
1003 blkcg_unpin_online(blkcg);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -04001004}
1005
1006/**
1007 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1008 * @blkcg: blkcg of interest
1009 *
1010 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1011 * is nested inside q lock, this function performs reverse double lock dancing.
1012 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1013 * blkcg_css_free to eventually be called.
1014 *
1015 * This is the blkcg counterpart of ioc_release_fn().
1016 */
1017void blkcg_destroy_blkgs(struct blkcg *blkcg)
1018{
Tejun Heo9f13ef62012-03-05 13:15:21 -08001019 spin_lock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001020
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001021 while (!hlist_empty(&blkcg->blkg_list)) {
1022 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1023 struct blkcg_gq, blkcg_node);
Tejun Heoc875f4d2012-03-05 13:15:22 -08001024 struct request_queue *q = blkg->q;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001025
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001026 if (spin_trylock(&q->queue_lock)) {
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001027 blkg_destroy(blkg);
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001028 spin_unlock(&q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -08001029 } else {
1030 spin_unlock_irq(&blkcg->lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -08001031 cpu_relax();
Dan Carpentera5567932012-03-29 20:57:08 +02001032 spin_lock_irq(&blkcg->lock);
Jens Axboe0f3942a2010-05-03 14:28:55 +02001033 }
Tejun Heo9f13ef62012-03-05 13:15:21 -08001034 }
Jens Axboe0f3942a2010-05-03 14:28:55 +02001035
Tejun Heo9f13ef62012-03-05 13:15:21 -08001036 spin_unlock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001037}
1038
Tejun Heoeb954192013-08-08 20:11:23 -04001039static void blkcg_css_free(struct cgroup_subsys_state *css)
Tejun Heo7ee9c562012-03-05 13:15:11 -08001040{
Tejun Heoeb954192013-08-08 20:11:23 -04001041 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heobc915e62015-08-18 14:55:08 -07001042 int i;
Tejun Heo7ee9c562012-03-05 13:15:11 -08001043
Tejun Heo7876f932015-07-09 16:39:49 -04001044 mutex_lock(&blkcg_pol_mutex);
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001045
Tejun Heo7876f932015-07-09 16:39:49 -04001046 list_del(&blkcg->all_blkcgs_node);
Tejun Heo7876f932015-07-09 16:39:49 -04001047
Tejun Heobc915e62015-08-18 14:55:08 -07001048 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001049 if (blkcg->cpd[i])
1050 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1051
1052 mutex_unlock(&blkcg_pol_mutex);
1053
Tejun Heobc915e62015-08-18 14:55:08 -07001054 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001055}
1056
Tejun Heoeb954192013-08-08 20:11:23 -04001057static struct cgroup_subsys_state *
1058blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001059{
Tejun Heo3c798392012-04-16 13:57:25 -07001060 struct blkcg *blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001061 struct cgroup_subsys_state *ret;
1062 int i;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001063
Tejun Heo7876f932015-07-09 16:39:49 -04001064 mutex_lock(&blkcg_pol_mutex);
1065
Tejun Heoeb954192013-08-08 20:11:23 -04001066 if (!parent_css) {
Tejun Heo3c798392012-04-16 13:57:25 -07001067 blkcg = &blkcg_root;
Tejun Heobc915e62015-08-18 14:55:08 -07001068 } else {
1069 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1070 if (!blkcg) {
1071 ret = ERR_PTR(-ENOMEM);
weiping zhang4c18c9e2017-08-25 23:49:32 +08001072 goto unlock;
Tejun Heobc915e62015-08-18 14:55:08 -07001073 }
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001074 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001075
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001076 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1077 struct blkcg_policy *pol = blkcg_policy[i];
1078 struct blkcg_policy_data *cpd;
1079
1080 /*
1081 * If the policy hasn't been attached yet, wait for it
1082 * to be attached before doing anything else. Otherwise,
1083 * check if the policy requires any specific per-cgroup
1084 * data: if it does, allocate and initialize it.
1085 */
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001086 if (!pol || !pol->cpd_alloc_fn)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001087 continue;
1088
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001089 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001090 if (!cpd) {
1091 ret = ERR_PTR(-ENOMEM);
1092 goto free_pd_blkcg;
1093 }
Tejun Heo81437642015-08-18 14:55:15 -07001094 blkcg->cpd[i] = cpd;
1095 cpd->blkcg = blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001096 cpd->plid = i;
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001097 if (pol->cpd_init_fn)
1098 pol->cpd_init_fn(cpd);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001099 }
1100
Vivek Goyal31e4c282009-12-03 12:59:42 -05001101 spin_lock_init(&blkcg->lock);
Tejun Heod866dbf2019-07-24 10:37:22 -07001102 refcount_set(&blkcg->online_pin, 1);
Tejun Heoe00f4f42016-11-21 18:03:32 -05001103 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001104 INIT_HLIST_HEAD(&blkcg->blkg_list);
Tejun Heo52ebea72015-05-22 17:13:37 -04001105#ifdef CONFIG_CGROUP_WRITEBACK
1106 INIT_LIST_HEAD(&blkcg->cgwb_list);
1107#endif
Tejun Heo7876f932015-07-09 16:39:49 -04001108 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1109
1110 mutex_unlock(&blkcg_pol_mutex);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001111 return &blkcg->css;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001112
1113free_pd_blkcg:
1114 for (i--; i >= 0; i--)
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001115 if (blkcg->cpd[i])
1116 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
weiping zhang4c18c9e2017-08-25 23:49:32 +08001117
1118 if (blkcg != &blkcg_root)
1119 kfree(blkcg);
1120unlock:
Tejun Heo7876f932015-07-09 16:39:49 -04001121 mutex_unlock(&blkcg_pol_mutex);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001122 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001123}
1124
Tejun Heo4308a432019-07-24 10:37:55 -07001125static int blkcg_css_online(struct cgroup_subsys_state *css)
1126{
1127 struct blkcg *blkcg = css_to_blkcg(css);
1128 struct blkcg *parent = blkcg_parent(blkcg);
1129
1130 /*
1131 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1132 * don't go offline while cgwbs are still active on them. Pin the
1133 * parent so that offline always happens towards the root.
1134 */
1135 if (parent)
1136 blkcg_pin_online(parent);
1137 return 0;
1138}
1139
Tejun Heo5efd6112012-03-05 13:15:12 -08001140/**
1141 * blkcg_init_queue - initialize blkcg part of request queue
1142 * @q: request_queue to initialize
1143 *
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02001144 * Called from blk_alloc_queue(). Responsible for initializing blkcg
Tejun Heo5efd6112012-03-05 13:15:12 -08001145 * part of new request_queue @q.
1146 *
1147 * RETURNS:
1148 * 0 on success, -errno on failure.
1149 */
1150int blkcg_init_queue(struct request_queue *q)
1151{
Jens Axboed708f0d2017-03-29 11:25:48 -06001152 struct blkcg_gq *new_blkg, *blkg;
1153 bool preloaded;
Tejun Heoec13b1d2015-05-22 17:13:19 -04001154 int ret;
Tejun Heo5efd6112012-03-05 13:15:12 -08001155
Jens Axboed708f0d2017-03-29 11:25:48 -06001156 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1157 if (!new_blkg)
1158 return -ENOMEM;
1159
1160 preloaded = !radix_tree_preload(GFP_KERNEL);
1161
Jiang Biaobea54882018-04-19 12:04:26 +08001162 /* Make sure the root blkg exists. */
Tejun Heoec13b1d2015-05-22 17:13:19 -04001163 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001164 spin_lock_irq(&q->queue_lock);
Jens Axboed708f0d2017-03-29 11:25:48 -06001165 blkg = blkg_create(&blkcg_root, q, new_blkg);
Jiang Biao901932a2018-04-19 12:06:09 +08001166 if (IS_ERR(blkg))
1167 goto err_unlock;
1168 q->root_blkg = blkg;
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001169 spin_unlock_irq(&q->queue_lock);
Tejun Heoec13b1d2015-05-22 17:13:19 -04001170 rcu_read_unlock();
1171
Jens Axboed708f0d2017-03-29 11:25:48 -06001172 if (preloaded)
1173 radix_tree_preload_end();
1174
Tejun Heoec13b1d2015-05-22 17:13:19 -04001175 ret = blk_throtl_init(q);
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001176 if (ret)
1177 goto err_destroy_all;
Yufen Yu27029b42020-08-10 22:21:16 -04001178
1179 ret = blk_iolatency_init(q);
1180 if (ret) {
1181 blk_throtl_exit(q);
1182 goto err_destroy_all;
1183 }
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001184 return 0;
Jiang Biao901932a2018-04-19 12:06:09 +08001185
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001186err_destroy_all:
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001187 blkg_destroy_all(q);
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001188 return ret;
Jiang Biao901932a2018-04-19 12:06:09 +08001189err_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001190 spin_unlock_irq(&q->queue_lock);
Jiang Biao901932a2018-04-19 12:06:09 +08001191 rcu_read_unlock();
1192 if (preloaded)
1193 radix_tree_preload_end();
1194 return PTR_ERR(blkg);
Tejun Heo5efd6112012-03-05 13:15:12 -08001195}
1196
1197/**
Tejun Heo5efd6112012-03-05 13:15:12 -08001198 * blkcg_exit_queue - exit and release blkcg part of request_queue
1199 * @q: request_queue being released
1200 *
Marcos Paulo de Souza7585d502019-01-25 00:01:42 -02001201 * Called from blk_exit_queue(). Responsible for exiting blkcg part.
Tejun Heo5efd6112012-03-05 13:15:12 -08001202 */
1203void blkcg_exit_queue(struct request_queue *q)
1204{
Tejun Heo3c96cb32012-04-13 13:11:34 -07001205 blkg_destroy_all(q);
Tejun Heo5efd6112012-03-05 13:15:12 -08001206 blk_throtl_exit(q);
1207}
1208
Vivek Goyal31e4c282009-12-03 12:59:42 -05001209/*
1210 * We cannot support shared io contexts, as we have no mean to support
1211 * two tasks with the same ioc in two different groups without major rework
1212 * of the main cic data structures. For now we allow a task to change
1213 * its cgroup only if it's the only owner of its ioc.
1214 */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001215static int blkcg_can_attach(struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001216{
Tejun Heobb9d97b2011-12-12 18:12:21 -08001217 struct task_struct *task;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001218 struct cgroup_subsys_state *dst_css;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001219 struct io_context *ioc;
1220 int ret = 0;
1221
1222 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001223 cgroup_taskset_for_each(task, dst_css, tset) {
Tejun Heobb9d97b2011-12-12 18:12:21 -08001224 task_lock(task);
1225 ioc = task->io_context;
1226 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1227 ret = -EINVAL;
1228 task_unlock(task);
1229 if (ret)
1230 break;
1231 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001232 return ret;
1233}
1234
Tejun Heo69d7fde2015-08-18 14:55:36 -07001235static void blkcg_bind(struct cgroup_subsys_state *root_css)
1236{
1237 int i;
1238
1239 mutex_lock(&blkcg_pol_mutex);
1240
1241 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1242 struct blkcg_policy *pol = blkcg_policy[i];
1243 struct blkcg *blkcg;
1244
1245 if (!pol || !pol->cpd_bind_fn)
1246 continue;
1247
1248 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1249 if (blkcg->cpd[pol->plid])
1250 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1251 }
1252 mutex_unlock(&blkcg_pol_mutex);
1253}
1254
Josef Bacikd09d8df2018-07-03 11:14:55 -04001255static void blkcg_exit(struct task_struct *tsk)
1256{
1257 if (tsk->throttle_queue)
1258 blk_put_queue(tsk->throttle_queue);
1259 tsk->throttle_queue = NULL;
1260}
1261
Tejun Heoc165b3e2015-08-18 14:55:29 -07001262struct cgroup_subsys io_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08001263 .css_alloc = blkcg_css_alloc,
Tejun Heo4308a432019-07-24 10:37:55 -07001264 .css_online = blkcg_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08001265 .css_offline = blkcg_css_offline,
1266 .css_free = blkcg_css_free,
Tejun Heo3c798392012-04-16 13:57:25 -07001267 .can_attach = blkcg_can_attach,
Tejun Heof7331642019-11-07 11:18:03 -08001268 .css_rstat_flush = blkcg_rstat_flush,
Tejun Heo69d7fde2015-08-18 14:55:36 -07001269 .bind = blkcg_bind,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001270 .dfl_cftypes = blkcg_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07001271 .legacy_cftypes = blkcg_legacy_files,
Tejun Heoc165b3e2015-08-18 14:55:29 -07001272 .legacy_name = "blkio",
Josef Bacikd09d8df2018-07-03 11:14:55 -04001273 .exit = blkcg_exit,
Tejun Heo1ced9532014-07-08 18:02:57 -04001274#ifdef CONFIG_MEMCG
1275 /*
1276 * This ensures that, if available, memcg is automatically enabled
1277 * together on the default hierarchy so that the owner cgroup can
1278 * be retrieved from writeback pages.
1279 */
1280 .depends_on = 1 << memory_cgrp_id,
1281#endif
Tejun Heo676f7c82012-04-01 12:09:55 -07001282};
Tejun Heoc165b3e2015-08-18 14:55:29 -07001283EXPORT_SYMBOL_GPL(io_cgrp_subsys);
Tejun Heo676f7c82012-04-01 12:09:55 -07001284
Tejun Heo8bd435b2012-04-13 13:11:28 -07001285/**
Tejun Heoa2b16932012-04-13 13:11:33 -07001286 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1287 * @q: request_queue of interest
1288 * @pol: blkcg policy to activate
1289 *
1290 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1291 * bypass mode to populate its blkgs with policy_data for @pol.
1292 *
1293 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1294 * from IO path. Update of each blkg is protected by both queue and blkcg
1295 * locks so that holding either lock and testing blkcg_policy_enabled() is
1296 * always enough for dereferencing policy data.
1297 *
1298 * The caller is responsible for synchronizing [de]activations and policy
1299 * [un]registerations. Returns 0 on success, -errno on failure.
1300 */
1301int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -07001302 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -07001303{
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001304 struct blkg_policy_data *pd_prealloc = NULL;
Tejun Heo9d179b82019-10-15 09:03:47 -07001305 struct blkcg_gq *blkg, *pinned_blkg = NULL;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001306 int ret;
Tejun Heoa2b16932012-04-13 13:11:33 -07001307
1308 if (blkcg_policy_enabled(q, pol))
1309 return 0;
1310
Jens Axboe344e9ff2018-11-15 12:22:51 -07001311 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001312 blk_mq_freeze_queue(q);
Tejun Heo9d179b82019-10-15 09:03:47 -07001313retry:
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001314 spin_lock_irq(&q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -07001315
Tejun Heo9d179b82019-10-15 09:03:47 -07001316 /* blkg_list is pushed at the head, reverse walk to allocate parents first */
Tejun Heo71c81402019-06-13 15:30:40 -07001317 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001318 struct blkg_policy_data *pd;
Tejun Heoa2b16932012-04-13 13:11:33 -07001319
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001320 if (blkg->pd[pol->plid])
1321 continue;
1322
Tejun Heo9d179b82019-10-15 09:03:47 -07001323 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1324 if (blkg == pinned_blkg) {
1325 pd = pd_prealloc;
1326 pd_prealloc = NULL;
1327 } else {
1328 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
1329 blkg->blkcg);
1330 }
1331
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001332 if (!pd) {
Tejun Heo9d179b82019-10-15 09:03:47 -07001333 /*
1334 * GFP_NOWAIT failed. Free the existing one and
1335 * prealloc for @blkg w/ GFP_KERNEL.
1336 */
1337 if (pinned_blkg)
1338 blkg_put(pinned_blkg);
1339 blkg_get(blkg);
1340 pinned_blkg = blkg;
1341
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001342 spin_unlock_irq(&q->queue_lock);
Tejun Heo9d179b82019-10-15 09:03:47 -07001343
1344 if (pd_prealloc)
1345 pol->pd_free_fn(pd_prealloc);
1346 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
1347 blkg->blkcg);
1348 if (pd_prealloc)
1349 goto retry;
1350 else
1351 goto enomem;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001352 }
Tejun Heoa2b16932012-04-13 13:11:33 -07001353
1354 blkg->pd[pol->plid] = pd;
1355 pd->blkg = blkg;
Tejun Heob276a872013-01-09 08:05:12 -08001356 pd->plid = pol->plid;
Tejun Heoa2b16932012-04-13 13:11:33 -07001357 }
1358
Tejun Heo9d179b82019-10-15 09:03:47 -07001359 /* all allocated, init in the same order */
1360 if (pol->pd_init_fn)
1361 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1362 pol->pd_init_fn(blkg->pd[pol->plid]);
1363
Tejun Heoa2b16932012-04-13 13:11:33 -07001364 __set_bit(pol->plid, q->blkcg_pols);
1365 ret = 0;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001366
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001367 spin_unlock_irq(&q->queue_lock);
Tejun Heo9d179b82019-10-15 09:03:47 -07001368out:
Jens Axboe344e9ff2018-11-15 12:22:51 -07001369 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001370 blk_mq_unfreeze_queue(q);
Tejun Heo9d179b82019-10-15 09:03:47 -07001371 if (pinned_blkg)
1372 blkg_put(pinned_blkg);
Tejun Heo001bea72015-08-18 14:55:11 -07001373 if (pd_prealloc)
1374 pol->pd_free_fn(pd_prealloc);
Tejun Heoa2b16932012-04-13 13:11:33 -07001375 return ret;
Tejun Heo9d179b82019-10-15 09:03:47 -07001376
1377enomem:
1378 /* alloc failed, nothing's initialized yet, free everything */
1379 spin_lock_irq(&q->queue_lock);
1380 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1381 if (blkg->pd[pol->plid]) {
1382 pol->pd_free_fn(blkg->pd[pol->plid]);
1383 blkg->pd[pol->plid] = NULL;
1384 }
1385 }
1386 spin_unlock_irq(&q->queue_lock);
1387 ret = -ENOMEM;
1388 goto out;
Tejun Heoa2b16932012-04-13 13:11:33 -07001389}
1390EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1391
1392/**
1393 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1394 * @q: request_queue of interest
1395 * @pol: blkcg policy to deactivate
1396 *
1397 * Deactivate @pol on @q. Follows the same synchronization rules as
1398 * blkcg_activate_policy().
1399 */
1400void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -07001401 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -07001402{
Tejun Heo3c798392012-04-16 13:57:25 -07001403 struct blkcg_gq *blkg;
Tejun Heoa2b16932012-04-13 13:11:33 -07001404
1405 if (!blkcg_policy_enabled(q, pol))
1406 return;
1407
Jens Axboe344e9ff2018-11-15 12:22:51 -07001408 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001409 blk_mq_freeze_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001410
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001411 spin_lock_irq(&q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -07001412
1413 __clear_bit(pol->plid, q->blkcg_pols);
1414
1415 list_for_each_entry(blkg, &q->blkg_list, q_node) {
Tejun Heo001bea72015-08-18 14:55:11 -07001416 if (blkg->pd[pol->plid]) {
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001417 if (pol->pd_offline_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -07001418 pol->pd_offline_fn(blkg->pd[pol->plid]);
Tejun Heo001bea72015-08-18 14:55:11 -07001419 pol->pd_free_fn(blkg->pd[pol->plid]);
1420 blkg->pd[pol->plid] = NULL;
1421 }
Tejun Heoa2b16932012-04-13 13:11:33 -07001422 }
1423
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001424 spin_unlock_irq(&q->queue_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -07001425
Jens Axboe344e9ff2018-11-15 12:22:51 -07001426 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001427 blk_mq_unfreeze_queue(q);
Tejun Heoa2b16932012-04-13 13:11:33 -07001428}
1429EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1430
1431/**
Tejun Heo3c798392012-04-16 13:57:25 -07001432 * blkcg_policy_register - register a blkcg policy
1433 * @pol: blkcg policy to register
Tejun Heo8bd435b2012-04-13 13:11:28 -07001434 *
Tejun Heo3c798392012-04-16 13:57:25 -07001435 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1436 * successful registration. Returns 0 on success and -errno on failure.
Tejun Heo8bd435b2012-04-13 13:11:28 -07001437 */
Jens Axboed5bf0292014-06-22 16:31:56 -06001438int blkcg_policy_register(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -05001439{
Tejun Heo06b285b2015-07-09 16:39:50 -04001440 struct blkcg *blkcg;
Tejun Heo8bd435b2012-04-13 13:11:28 -07001441 int i, ret;
Tejun Heoe8989fa2012-03-05 13:15:20 -08001442
Tejun Heo838f13b2015-07-09 16:39:47 -04001443 mutex_lock(&blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -07001444 mutex_lock(&blkcg_pol_mutex);
1445
Tejun Heo8bd435b2012-04-13 13:11:28 -07001446 /* find an empty slot */
1447 ret = -ENOSPC;
1448 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo3c798392012-04-16 13:57:25 -07001449 if (!blkcg_policy[i])
Tejun Heo8bd435b2012-04-13 13:11:28 -07001450 break;
Jens Axboe01c5f852018-09-11 10:59:53 -06001451 if (i >= BLKCG_MAX_POLS) {
1452 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
Tejun Heo838f13b2015-07-09 16:39:47 -04001453 goto err_unlock;
Jens Axboe01c5f852018-09-11 10:59:53 -06001454 }
Tejun Heo035d10b2012-03-05 13:15:04 -08001455
weiping zhange8401072017-10-17 23:56:21 +08001456 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1457 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1458 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1459 goto err_unlock;
1460
Tejun Heo06b285b2015-07-09 16:39:50 -04001461 /* register @pol */
Tejun Heo3c798392012-04-16 13:57:25 -07001462 pol->plid = i;
Tejun Heo06b285b2015-07-09 16:39:50 -04001463 blkcg_policy[pol->plid] = pol;
1464
1465 /* allocate and install cpd's */
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001466 if (pol->cpd_alloc_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001467 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1468 struct blkcg_policy_data *cpd;
1469
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001470 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
Bart Van Asschebbb427e2016-09-29 08:33:30 -07001471 if (!cpd)
Tejun Heo06b285b2015-07-09 16:39:50 -04001472 goto err_free_cpds;
Tejun Heo06b285b2015-07-09 16:39:50 -04001473
Tejun Heo81437642015-08-18 14:55:15 -07001474 blkcg->cpd[pol->plid] = cpd;
1475 cpd->blkcg = blkcg;
Tejun Heo06b285b2015-07-09 16:39:50 -04001476 cpd->plid = pol->plid;
Tejun Heo86a5bba2019-08-28 15:05:52 -07001477 if (pol->cpd_init_fn)
1478 pol->cpd_init_fn(cpd);
Tejun Heo06b285b2015-07-09 16:39:50 -04001479 }
1480 }
1481
Tejun Heo838f13b2015-07-09 16:39:47 -04001482 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo8bd435b2012-04-13 13:11:28 -07001483
Tejun Heo8bd435b2012-04-13 13:11:28 -07001484 /* everything is in place, add intf files for the new policy */
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001485 if (pol->dfl_cftypes)
1486 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1487 pol->dfl_cftypes));
Tejun Heo880f50e2015-08-18 14:55:30 -07001488 if (pol->legacy_cftypes)
Tejun Heoc165b3e2015-08-18 14:55:29 -07001489 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
Tejun Heo880f50e2015-08-18 14:55:30 -07001490 pol->legacy_cftypes));
Tejun Heo838f13b2015-07-09 16:39:47 -04001491 mutex_unlock(&blkcg_pol_register_mutex);
1492 return 0;
1493
Tejun Heo06b285b2015-07-09 16:39:50 -04001494err_free_cpds:
weiping zhang58a9edc2017-10-10 22:53:46 +08001495 if (pol->cpd_free_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001496 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001497 if (blkcg->cpd[pol->plid]) {
1498 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1499 blkcg->cpd[pol->plid] = NULL;
1500 }
Tejun Heo06b285b2015-07-09 16:39:50 -04001501 }
1502 }
1503 blkcg_policy[pol->plid] = NULL;
Tejun Heo838f13b2015-07-09 16:39:47 -04001504err_unlock:
Tejun Heobc0d6502012-04-13 13:11:26 -07001505 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo838f13b2015-07-09 16:39:47 -04001506 mutex_unlock(&blkcg_pol_register_mutex);
Tejun Heo8bd435b2012-04-13 13:11:28 -07001507 return ret;
Vivek Goyal3e252062009-12-04 10:36:42 -05001508}
Tejun Heo3c798392012-04-16 13:57:25 -07001509EXPORT_SYMBOL_GPL(blkcg_policy_register);
Vivek Goyal3e252062009-12-04 10:36:42 -05001510
Tejun Heo8bd435b2012-04-13 13:11:28 -07001511/**
Tejun Heo3c798392012-04-16 13:57:25 -07001512 * blkcg_policy_unregister - unregister a blkcg policy
1513 * @pol: blkcg policy to unregister
Tejun Heo8bd435b2012-04-13 13:11:28 -07001514 *
Tejun Heo3c798392012-04-16 13:57:25 -07001515 * Undo blkcg_policy_register(@pol). Might sleep.
Tejun Heo8bd435b2012-04-13 13:11:28 -07001516 */
Tejun Heo3c798392012-04-16 13:57:25 -07001517void blkcg_policy_unregister(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -05001518{
Tejun Heo06b285b2015-07-09 16:39:50 -04001519 struct blkcg *blkcg;
1520
Tejun Heo838f13b2015-07-09 16:39:47 -04001521 mutex_lock(&blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -07001522
Tejun Heo3c798392012-04-16 13:57:25 -07001523 if (WARN_ON(blkcg_policy[pol->plid] != pol))
Tejun Heo8bd435b2012-04-13 13:11:28 -07001524 goto out_unlock;
1525
1526 /* kill the intf files first */
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001527 if (pol->dfl_cftypes)
1528 cgroup_rm_cftypes(pol->dfl_cftypes);
Tejun Heo880f50e2015-08-18 14:55:30 -07001529 if (pol->legacy_cftypes)
1530 cgroup_rm_cftypes(pol->legacy_cftypes);
Tejun Heo44ea53d2012-04-01 14:38:43 -07001531
Tejun Heo06b285b2015-07-09 16:39:50 -04001532 /* remove cpds and unregister */
Tejun Heo838f13b2015-07-09 16:39:47 -04001533 mutex_lock(&blkcg_pol_mutex);
Tejun Heo06b285b2015-07-09 16:39:50 -04001534
weiping zhang58a9edc2017-10-10 22:53:46 +08001535 if (pol->cpd_free_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001536 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001537 if (blkcg->cpd[pol->plid]) {
1538 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1539 blkcg->cpd[pol->plid] = NULL;
1540 }
Tejun Heo06b285b2015-07-09 16:39:50 -04001541 }
1542 }
Tejun Heo3c798392012-04-16 13:57:25 -07001543 blkcg_policy[pol->plid] = NULL;
Tejun Heo06b285b2015-07-09 16:39:50 -04001544
Tejun Heobc0d6502012-04-13 13:11:26 -07001545 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo838f13b2015-07-09 16:39:47 -04001546out_unlock:
1547 mutex_unlock(&blkcg_pol_register_mutex);
Vivek Goyal3e252062009-12-04 10:36:42 -05001548}
Tejun Heo3c798392012-04-16 13:57:25 -07001549EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
Josef Bacik903d23f2018-07-03 11:14:52 -04001550
Tejun Heod3f77df2019-06-27 13:39:52 -07001551bool __blkcg_punt_bio_submit(struct bio *bio)
1552{
1553 struct blkcg_gq *blkg = bio->bi_blkg;
1554
1555 /* consume the flag first */
1556 bio->bi_opf &= ~REQ_CGROUP_PUNT;
1557
1558 /* never bounce for the root cgroup */
1559 if (!blkg->parent)
1560 return false;
1561
1562 spin_lock_bh(&blkg->async_bio_lock);
1563 bio_list_add(&blkg->async_bios, bio);
1564 spin_unlock_bh(&blkg->async_bio_lock);
1565
1566 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1567 return true;
1568}
1569
Josef Bacikd09d8df2018-07-03 11:14:55 -04001570/*
1571 * Scale the accumulated delay based on how long it has been since we updated
1572 * the delay. We only call this when we are adding delay, in case it's been a
1573 * while since we added delay, and when we are checking to see if we need to
1574 * delay a task, to account for any delays that may have occurred.
1575 */
1576static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1577{
1578 u64 old = atomic64_read(&blkg->delay_start);
1579
Tejun Heo54c52e12020-04-13 12:27:55 -04001580 /* negative use_delay means no scaling, see blkcg_set_delay() */
1581 if (atomic_read(&blkg->use_delay) < 0)
1582 return;
1583
Josef Bacikd09d8df2018-07-03 11:14:55 -04001584 /*
1585 * We only want to scale down every second. The idea here is that we
1586 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1587 * time window. We only want to throttle tasks for recent delay that
1588 * has occurred, in 1 second time windows since that's the maximum
1589 * things can be throttled. We save the current delay window in
1590 * blkg->last_delay so we know what amount is still left to be charged
1591 * to the blkg from this point onward. blkg->last_use keeps track of
1592 * the use_delay counter. The idea is if we're unthrottling the blkg we
1593 * are ok with whatever is happening now, and we can take away more of
1594 * the accumulated delay as we've already throttled enough that
1595 * everybody is happy with their IO latencies.
1596 */
1597 if (time_before64(old + NSEC_PER_SEC, now) &&
1598 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1599 u64 cur = atomic64_read(&blkg->delay_nsec);
1600 u64 sub = min_t(u64, blkg->last_delay, now - old);
1601 int cur_use = atomic_read(&blkg->use_delay);
1602
1603 /*
1604 * We've been unthrottled, subtract a larger chunk of our
1605 * accumulated delay.
1606 */
1607 if (cur_use < blkg->last_use)
1608 sub = max_t(u64, sub, blkg->last_delay >> 1);
1609
1610 /*
1611 * This shouldn't happen, but handle it anyway. Our delay_nsec
1612 * should only ever be growing except here where we subtract out
1613 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1614 * rather not end up with negative numbers.
1615 */
1616 if (unlikely(cur < sub)) {
1617 atomic64_set(&blkg->delay_nsec, 0);
1618 blkg->last_delay = 0;
1619 } else {
1620 atomic64_sub(sub, &blkg->delay_nsec);
1621 blkg->last_delay = cur - sub;
1622 }
1623 blkg->last_use = cur_use;
1624 }
1625}
1626
1627/*
1628 * This is called when we want to actually walk up the hierarchy and check to
1629 * see if we need to throttle, and then actually throttle if there is some
1630 * accumulated delay. This should only be called upon return to user space so
1631 * we're not holding some lock that would induce a priority inversion.
1632 */
1633static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1634{
Josef Bacikfd112c72019-07-09 14:41:29 -07001635 unsigned long pflags;
Tejun Heo5160a5a2020-09-01 14:52:52 -04001636 bool clamp;
Josef Bacikd09d8df2018-07-03 11:14:55 -04001637 u64 now = ktime_to_ns(ktime_get());
1638 u64 exp;
1639 u64 delay_nsec = 0;
1640 int tok;
1641
1642 while (blkg->parent) {
Tejun Heo5160a5a2020-09-01 14:52:52 -04001643 int use_delay = atomic_read(&blkg->use_delay);
1644
1645 if (use_delay) {
1646 u64 this_delay;
1647
Josef Bacikd09d8df2018-07-03 11:14:55 -04001648 blkcg_scale_delay(blkg, now);
Tejun Heo5160a5a2020-09-01 14:52:52 -04001649 this_delay = atomic64_read(&blkg->delay_nsec);
1650 if (this_delay > delay_nsec) {
1651 delay_nsec = this_delay;
1652 clamp = use_delay > 0;
1653 }
Josef Bacikd09d8df2018-07-03 11:14:55 -04001654 }
1655 blkg = blkg->parent;
1656 }
1657
1658 if (!delay_nsec)
1659 return;
1660
1661 /*
1662 * Let's not sleep for all eternity if we've amassed a huge delay.
1663 * Swapping or metadata IO can accumulate 10's of seconds worth of
1664 * delay, and we want userspace to be able to do _something_ so cap the
Tejun Heo5160a5a2020-09-01 14:52:52 -04001665 * delays at 0.25s. If there's 10's of seconds worth of delay then the
1666 * tasks will be delayed for 0.25 second for every syscall. If
1667 * blkcg_set_delay() was used as indicated by negative use_delay, the
1668 * caller is responsible for regulating the range.
Josef Bacikd09d8df2018-07-03 11:14:55 -04001669 */
Tejun Heo5160a5a2020-09-01 14:52:52 -04001670 if (clamp)
1671 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001672
Josef Bacikfd112c72019-07-09 14:41:29 -07001673 if (use_memdelay)
1674 psi_memstall_enter(&pflags);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001675
1676 exp = ktime_add_ns(now, delay_nsec);
1677 tok = io_schedule_prepare();
1678 do {
1679 __set_current_state(TASK_KILLABLE);
1680 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1681 break;
1682 } while (!fatal_signal_pending(current));
1683 io_schedule_finish(tok);
Josef Bacikfd112c72019-07-09 14:41:29 -07001684
1685 if (use_memdelay)
1686 psi_memstall_leave(&pflags);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001687}
1688
1689/**
1690 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1691 *
1692 * This is only called if we've been marked with set_notify_resume(). Obviously
1693 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1694 * check to see if current->throttle_queue is set and if not this doesn't do
1695 * anything. This should only ever be called by the resume code, it's not meant
1696 * to be called by people willy-nilly as it will actually do the work to
1697 * throttle the task if it is setup for throttling.
1698 */
1699void blkcg_maybe_throttle_current(void)
1700{
1701 struct request_queue *q = current->throttle_queue;
1702 struct cgroup_subsys_state *css;
1703 struct blkcg *blkcg;
1704 struct blkcg_gq *blkg;
1705 bool use_memdelay = current->use_memdelay;
1706
1707 if (!q)
1708 return;
1709
1710 current->throttle_queue = NULL;
1711 current->use_memdelay = false;
1712
1713 rcu_read_lock();
1714 css = kthread_blkcg();
1715 if (css)
1716 blkcg = css_to_blkcg(css);
1717 else
1718 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1719
1720 if (!blkcg)
1721 goto out;
1722 blkg = blkg_lookup(blkcg, q);
1723 if (!blkg)
1724 goto out;
Dennis Zhou7754f662018-12-05 12:10:39 -05001725 if (!blkg_tryget(blkg))
Josef Bacikd09d8df2018-07-03 11:14:55 -04001726 goto out;
1727 rcu_read_unlock();
Josef Bacikd09d8df2018-07-03 11:14:55 -04001728
1729 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1730 blkg_put(blkg);
Josef Bacikcc7ecc252018-07-31 12:39:03 -04001731 blk_put_queue(q);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001732 return;
1733out:
1734 rcu_read_unlock();
1735 blk_put_queue(q);
1736}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001737
1738/**
1739 * blkcg_schedule_throttle - this task needs to check for throttling
Bart Van Assche537d71b2019-03-20 13:18:45 -07001740 * @q: the request queue IO was submitted on
1741 * @use_memdelay: do we charge this to memory delay for PSI
Josef Bacikd09d8df2018-07-03 11:14:55 -04001742 *
1743 * This is called by the IO controller when we know there's delay accumulated
1744 * for the blkg for this task. We do not pass the blkg because there are places
1745 * we call this that may not have that information, the swapping code for
1746 * instance will only have a request_queue at that point. This set's the
1747 * notify_resume for the task to check and see if it requires throttling before
1748 * returning to user space.
1749 *
1750 * We will only schedule once per syscall. You can call this over and over
1751 * again and it will only do the check once upon return to user space, and only
1752 * throttle once. If the task needs to be throttled again it'll need to be
1753 * re-set at the next time we see the task.
1754 */
1755void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1756{
1757 if (unlikely(current->flags & PF_KTHREAD))
1758 return;
1759
1760 if (!blk_get_queue(q))
1761 return;
1762
1763 if (current->throttle_queue)
1764 blk_put_queue(current->throttle_queue);
1765 current->throttle_queue = q;
1766 if (use_memdelay)
1767 current->use_memdelay = use_memdelay;
1768 set_notify_resume(current);
1769}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001770
1771/**
1772 * blkcg_add_delay - add delay to this blkg
Bart Van Assche537d71b2019-03-20 13:18:45 -07001773 * @blkg: blkg of interest
1774 * @now: the current time in nanoseconds
1775 * @delta: how many nanoseconds of delay to add
Josef Bacikd09d8df2018-07-03 11:14:55 -04001776 *
1777 * Charge @delta to the blkg's current delay accumulation. This is used to
1778 * throttle tasks if an IO controller thinks we need more throttling.
1779 */
1780void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1781{
Tejun Heo54c52e12020-04-13 12:27:55 -04001782 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
1783 return;
Josef Bacikd09d8df2018-07-03 11:14:55 -04001784 blkcg_scale_delay(blkg, now);
1785 atomic64_add(delta, &blkg->delay_nsec);
1786}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001787
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001788/**
1789 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
Christoph Hellwig13c78632020-06-27 09:31:54 +02001790 * @bio: target bio
1791 * @css: target css
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001792 *
Christoph Hellwig13c78632020-06-27 09:31:54 +02001793 * As the failure mode here is to walk up the blkg tree, this ensure that the
1794 * blkg->parent pointers are always valid. This returns the blkg that it ended
1795 * up taking a reference on or %NULL if no reference was taken.
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001796 */
Christoph Hellwig13c78632020-06-27 09:31:54 +02001797static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
1798 struct cgroup_subsys_state *css)
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001799{
Christoph Hellwig13c78632020-06-27 09:31:54 +02001800 struct blkcg_gq *blkg, *ret_blkg = NULL;
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001801
Christoph Hellwig13c78632020-06-27 09:31:54 +02001802 rcu_read_lock();
1803 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue);
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001804 while (blkg) {
1805 if (blkg_tryget(blkg)) {
1806 ret_blkg = blkg;
1807 break;
1808 }
1809 blkg = blkg->parent;
1810 }
Christoph Hellwig13c78632020-06-27 09:31:54 +02001811 rcu_read_unlock();
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001812
1813 return ret_blkg;
1814}
1815
1816/**
1817 * bio_associate_blkg_from_css - associate a bio with a specified css
1818 * @bio: target bio
1819 * @css: target css
1820 *
1821 * Associate @bio with the blkg found by combining the css's blkg and the
1822 * request_queue of the @bio. An association failure is handled by walking up
1823 * the blkg tree. Therefore, the blkg associated can be anything between @blkg
1824 * and q->root_blkg. This situation only happens when a cgroup is dying and
1825 * then the remaining bios will spill to the closest alive blkg.
1826 *
1827 * A reference will be taken on the blkg and will be released when @bio is
1828 * freed.
1829 */
1830void bio_associate_blkg_from_css(struct bio *bio,
1831 struct cgroup_subsys_state *css)
1832{
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001833 if (bio->bi_blkg)
1834 blkg_put(bio->bi_blkg);
1835
Christoph Hellwiga5b97522020-06-27 09:31:53 +02001836 if (css && css->parent) {
Christoph Hellwig13c78632020-06-27 09:31:54 +02001837 bio->bi_blkg = blkg_tryget_closest(bio, css);
Christoph Hellwiga5b97522020-06-27 09:31:53 +02001838 } else {
Christoph Hellwig13c78632020-06-27 09:31:54 +02001839 blkg_get(bio->bi_disk->queue->root_blkg);
1840 bio->bi_blkg = bio->bi_disk->queue->root_blkg;
Christoph Hellwiga5b97522020-06-27 09:31:53 +02001841 }
Christoph Hellwig28fc5912020-06-27 09:31:51 +02001842}
1843EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
1844
1845/**
1846 * bio_associate_blkg - associate a bio with a blkg
1847 * @bio: target bio
1848 *
1849 * Associate @bio with the blkg found from the bio's css and request_queue.
1850 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
1851 * already associated, the css is reused and association redone as the
1852 * request_queue may have changed.
1853 */
1854void bio_associate_blkg(struct bio *bio)
1855{
1856 struct cgroup_subsys_state *css;
1857
1858 rcu_read_lock();
1859
1860 if (bio->bi_blkg)
1861 css = &bio_blkcg(bio)->css;
1862 else
1863 css = blkcg_css();
1864
1865 bio_associate_blkg_from_css(bio, css);
1866
1867 rcu_read_unlock();
1868}
1869EXPORT_SYMBOL_GPL(bio_associate_blkg);
1870
1871/**
1872 * bio_clone_blkg_association - clone blkg association from src to dst bio
1873 * @dst: destination bio
1874 * @src: source bio
1875 */
1876void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1877{
1878 if (src->bi_blkg) {
1879 if (dst->bi_blkg)
1880 blkg_put(dst->bi_blkg);
1881 blkg_get(src->bi_blkg);
1882 dst->bi_blkg = src->bi_blkg;
1883 }
1884}
1885EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
1886
Christoph Hellwigdb18a532020-06-27 09:31:58 +02001887static int blk_cgroup_io_type(struct bio *bio)
1888{
1889 if (op_is_discard(bio->bi_opf))
1890 return BLKG_IOSTAT_DISCARD;
1891 if (op_is_write(bio->bi_opf))
1892 return BLKG_IOSTAT_WRITE;
1893 return BLKG_IOSTAT_READ;
1894}
1895
1896void blk_cgroup_bio_start(struct bio *bio)
1897{
1898 int rwd = blk_cgroup_io_type(bio), cpu;
1899 struct blkg_iostat_set *bis;
1900
1901 cpu = get_cpu();
1902 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
1903 u64_stats_update_begin(&bis->sync);
1904
1905 /*
1906 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
1907 * bio and we would have already accounted for the size of the bio.
1908 */
1909 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
1910 bio_set_flag(bio, BIO_CGROUP_ACCT);
Colin Ian King0b8cc252020-06-30 16:54:41 +01001911 bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
Christoph Hellwigdb18a532020-06-27 09:31:58 +02001912 }
1913 bis->cur.ios[rwd]++;
1914
1915 u64_stats_update_end(&bis->sync);
1916 if (cgroup_subsys_on_dfl(io_cgrp_subsys))
1917 cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
1918 put_cpu();
1919}
1920
Tejun Heod3f77df2019-06-27 13:39:52 -07001921static int __init blkcg_init(void)
1922{
1923 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
1924 WQ_MEM_RECLAIM | WQ_FREEZABLE |
1925 WQ_UNBOUND | WQ_SYSFS, 0);
1926 if (!blkcg_punt_bio_wq)
1927 return -ENOMEM;
1928 return 0;
1929}
1930subsys_initcall(blkcg_init);
1931
Josef Bacik903d23f2018-07-03 11:14:52 -04001932module_param(blkcg_debug_stats, bool, 0644);
1933MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");