blob: 0e2619c1a4225c3535152d7b36b0ab1680c13851 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Vivek Goyal31e4c282009-12-03 12:59:42 -05002/*
3 * Common Block IO controller cgroup interface
4 *
5 * Based on ideas and code from CFQ, CFS and BFQ:
6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 *
8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9 * Paolo Valente <paolo.valente@unimore.it>
10 *
11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12 * Nauman Rafique <nauman@google.com>
Arianna Avanzinie48453c2015-06-05 23:38:42 +020013 *
14 * For policy-specific per-blkcg data:
15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16 * Arianna Avanzini <avanzini.arianna@gmail.com>
Vivek Goyal31e4c282009-12-03 12:59:42 -050017 */
18#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050019#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050020#include <linux/module.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010021#include <linux/sched/signal.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110022#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070023#include <linux/blkdev.h>
Tejun Heo52ebea72015-05-22 17:13:37 -040024#include <linux/backing-dev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080026#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080027#include <linux/delay.h>
Tejun Heo9a9e8a22012-03-19 15:10:56 -070028#include <linux/atomic.h>
Tejun Heo36aa9e52015-08-18 14:55:31 -070029#include <linux/ctype.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040030#include <linux/blk-cgroup.h>
Josef Bacikd09d8df2018-07-03 11:14:55 -040031#include <linux/tracehook.h>
Josef Bacikfd112c72019-07-09 14:41:29 -070032#include <linux/psi.h>
Tejun Heo5efd6112012-03-05 13:15:12 -080033#include "blk.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050034
Divyesh Shah84c124d2010-04-09 08:31:19 +020035#define MAX_KEY_LEN 100
36
Tejun Heo838f13b2015-07-09 16:39:47 -040037/*
38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
40 * policy [un]register operations including cgroup file additions /
41 * removals. Putting cgroup file registration outside blkcg_pol_mutex
42 * allows grabbing it from cgroup callbacks.
43 */
44static DEFINE_MUTEX(blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -070045static DEFINE_MUTEX(blkcg_pol_mutex);
Tejun Heo923adde2012-03-05 13:15:13 -080046
Arianna Avanzinie48453c2015-06-05 23:38:42 +020047struct blkcg blkcg_root;
Tejun Heo3c798392012-04-16 13:57:25 -070048EXPORT_SYMBOL_GPL(blkcg_root);
Vivek Goyal9d6a9862009-12-04 10:36:41 -050049
Tejun Heo496d5e72015-05-22 17:13:21 -040050struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
Tejun Heo9b0eb692019-06-27 13:39:48 -070051EXPORT_SYMBOL_GPL(blkcg_root_css);
Tejun Heo496d5e72015-05-22 17:13:21 -040052
Tejun Heo3c798392012-04-16 13:57:25 -070053static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
Tejun Heo035d10b2012-03-05 13:15:04 -080054
Tejun Heo7876f932015-07-09 16:39:49 -040055static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
56
Tejun Heo07b0fde2019-07-16 07:58:31 -070057bool blkcg_debug_stats = false;
Tejun Heod3f77df2019-06-27 13:39:52 -070058static struct workqueue_struct *blkcg_punt_bio_wq;
Josef Bacik903d23f2018-07-03 11:14:52 -040059
Tejun Heoa2b16932012-04-13 13:11:33 -070060static bool blkcg_policy_enabled(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -070061 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -070062{
63 return pol && test_bit(pol->plid, q->blkcg_pols);
64}
65
Tejun Heo03814112012-03-05 13:15:14 -080066/**
67 * blkg_free - free a blkg
68 * @blkg: blkg to free
69 *
70 * Free @blkg which may be partially allocated.
71 */
Tejun Heo3c798392012-04-16 13:57:25 -070072static void blkg_free(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -080073{
Tejun Heoe8989fa2012-03-05 13:15:20 -080074 int i;
Tejun Heo549d3aa2012-03-05 13:15:16 -080075
76 if (!blkg)
77 return;
78
Tejun Heodb613672013-05-14 13:52:31 -070079 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo001bea72015-08-18 14:55:11 -070080 if (blkg->pd[i])
81 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
Tejun Heoe8989fa2012-03-05 13:15:20 -080082
Tejun Heo77ea7332015-08-18 14:55:24 -070083 blkg_rwstat_exit(&blkg->stat_ios);
84 blkg_rwstat_exit(&blkg->stat_bytes);
Tejun Heoef069b92019-06-13 15:30:39 -070085 percpu_ref_exit(&blkg->refcnt);
Tejun Heo549d3aa2012-03-05 13:15:16 -080086 kfree(blkg);
Tejun Heo03814112012-03-05 13:15:14 -080087}
88
Dennis Zhou7fcf2b02018-12-05 12:10:38 -050089static void __blkg_release(struct rcu_head *rcu)
90{
91 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
92
Tejun Heod3f77df2019-06-27 13:39:52 -070093 WARN_ON(!bio_list_empty(&blkg->async_bios));
94
Dennis Zhou7fcf2b02018-12-05 12:10:38 -050095 /* release the blkcg and parent blkg refs this blkg has been holding */
96 css_put(&blkg->blkcg->css);
97 if (blkg->parent)
98 blkg_put(blkg->parent);
99
100 wb_congested_put(blkg->wb_congested);
101
102 blkg_free(blkg);
103}
104
105/*
106 * A group is RCU protected, but having an rcu lock does not mean that one
107 * can access all the fields of blkg and assume these are valid. For
108 * example, don't try to follow throtl_data and request queue links.
109 *
110 * Having a reference to blkg under an rcu allows accesses to only values
111 * local to groups like group stats and group rate limits.
112 */
113static void blkg_release(struct percpu_ref *ref)
114{
115 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
116
117 call_rcu(&blkg->rcu_head, __blkg_release);
118}
119
Tejun Heod3f77df2019-06-27 13:39:52 -0700120static void blkg_async_bio_workfn(struct work_struct *work)
121{
122 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
123 async_bio_work);
124 struct bio_list bios = BIO_EMPTY_LIST;
125 struct bio *bio;
126
127 /* as long as there are pending bios, @blkg can't go away */
128 spin_lock_bh(&blkg->async_bio_lock);
129 bio_list_merge(&bios, &blkg->async_bios);
130 bio_list_init(&blkg->async_bios);
131 spin_unlock_bh(&blkg->async_bio_lock);
132
133 while ((bio = bio_list_pop(&bios)))
134 submit_bio(bio);
135}
136
Tejun Heo03814112012-03-05 13:15:14 -0800137/**
138 * blkg_alloc - allocate a blkg
139 * @blkcg: block cgroup the new blkg is associated with
140 * @q: request_queue the new blkg is associated with
Tejun Heo15974992012-06-04 20:40:52 -0700141 * @gfp_mask: allocation mask to use
Tejun Heo03814112012-03-05 13:15:14 -0800142 *
Tejun Heoe8989fa2012-03-05 13:15:20 -0800143 * Allocate a new blkg assocating @blkcg and @q.
Tejun Heo03814112012-03-05 13:15:14 -0800144 */
Tejun Heo15974992012-06-04 20:40:52 -0700145static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
146 gfp_t gfp_mask)
Tejun Heo03814112012-03-05 13:15:14 -0800147{
Tejun Heo3c798392012-04-16 13:57:25 -0700148 struct blkcg_gq *blkg;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800149 int i;
Tejun Heo03814112012-03-05 13:15:14 -0800150
151 /* alloc and init base part */
Tejun Heo15974992012-06-04 20:40:52 -0700152 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
Tejun Heo03814112012-03-05 13:15:14 -0800153 if (!blkg)
154 return NULL;
155
Tejun Heoef069b92019-06-13 15:30:39 -0700156 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
157 goto err_free;
158
Tejun Heo77ea7332015-08-18 14:55:24 -0700159 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
160 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
161 goto err_free;
162
Tejun Heoc875f4d2012-03-05 13:15:22 -0800163 blkg->q = q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800164 INIT_LIST_HEAD(&blkg->q_node);
Tejun Heod3f77df2019-06-27 13:39:52 -0700165 spin_lock_init(&blkg->async_bio_lock);
166 bio_list_init(&blkg->async_bios);
167 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
Tejun Heo03814112012-03-05 13:15:14 -0800168 blkg->blkcg = blkcg;
Tejun Heo03814112012-03-05 13:15:14 -0800169
Tejun Heo8bd435b2012-04-13 13:11:28 -0700170 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700171 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heoe8989fa2012-03-05 13:15:20 -0800172 struct blkg_policy_data *pd;
Tejun Heo03814112012-03-05 13:15:14 -0800173
Tejun Heoa2b16932012-04-13 13:11:33 -0700174 if (!blkcg_policy_enabled(q, pol))
Tejun Heoe8989fa2012-03-05 13:15:20 -0800175 continue;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800176
Tejun Heoe8989fa2012-03-05 13:15:20 -0800177 /* alloc per-policy data and attach it to blkg */
Tejun Heocf09a8e2019-08-28 15:05:51 -0700178 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
Tejun Heoa0516612012-06-26 15:05:44 -0700179 if (!pd)
180 goto err_free;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800181
Tejun Heoe8989fa2012-03-05 13:15:20 -0800182 blkg->pd[i] = pd;
183 pd->blkg = blkg;
Tejun Heob276a872013-01-09 08:05:12 -0800184 pd->plid = i;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800185 }
186
Tejun Heo03814112012-03-05 13:15:14 -0800187 return blkg;
Tejun Heoa0516612012-06-26 15:05:44 -0700188
189err_free:
190 blkg_free(blkg);
191 return NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800192}
193
Tejun Heo24f29042015-08-18 14:55:17 -0700194struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
195 struct request_queue *q, bool update_hint)
Tejun Heo80fd9972012-04-13 14:50:53 -0700196{
Tejun Heo3c798392012-04-16 13:57:25 -0700197 struct blkcg_gq *blkg;
Tejun Heo80fd9972012-04-13 14:50:53 -0700198
Tejun Heoa6371202012-04-19 16:29:24 -0700199 /*
Tejun Heo86cde6b2013-01-09 08:05:10 -0800200 * Hint didn't match. Look up from the radix tree. Note that the
201 * hint can only be updated under queue_lock as otherwise @blkg
202 * could have already been removed from blkg_tree. The caller is
203 * responsible for grabbing queue_lock if @update_hint.
Tejun Heoa6371202012-04-19 16:29:24 -0700204 */
205 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800206 if (blkg && blkg->q == q) {
207 if (update_hint) {
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700208 lockdep_assert_held(&q->queue_lock);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800209 rcu_assign_pointer(blkcg->blkg_hint, blkg);
210 }
Tejun Heoa6371202012-04-19 16:29:24 -0700211 return blkg;
Tejun Heo86cde6b2013-01-09 08:05:10 -0800212 }
Tejun Heoa6371202012-04-19 16:29:24 -0700213
Tejun Heo80fd9972012-04-13 14:50:53 -0700214 return NULL;
215}
Tejun Heoae118892015-08-18 14:55:20 -0700216EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
Tejun Heo80fd9972012-04-13 14:50:53 -0700217
Tejun Heo15974992012-06-04 20:40:52 -0700218/*
Jens Axboed708f0d2017-03-29 11:25:48 -0600219 * If @new_blkg is %NULL, this function tries to allocate a new one as
220 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
Tejun Heo15974992012-06-04 20:40:52 -0700221 */
Tejun Heo86cde6b2013-01-09 08:05:10 -0800222static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
Jens Axboed708f0d2017-03-29 11:25:48 -0600223 struct request_queue *q,
224 struct blkcg_gq *new_blkg)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400225{
Jens Axboed708f0d2017-03-29 11:25:48 -0600226 struct blkcg_gq *blkg;
Tejun Heoce7acfe2015-05-22 17:13:38 -0400227 struct bdi_writeback_congested *wb_congested;
Tejun Heof427d902013-01-09 08:05:12 -0800228 int i, ret;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400229
Tejun Heocd1604f2012-03-05 13:15:06 -0800230 WARN_ON_ONCE(!rcu_read_lock_held());
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700231 lockdep_assert_held(&q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500232
Dennis Zhou0273ac32018-12-11 18:03:08 -0500233 /* request_queue is dying, do not create/recreate a blkg */
234 if (blk_queue_dying(q)) {
235 ret = -ENODEV;
236 goto err_free_blkg;
237 }
238
Tejun Heo7ee9c562012-03-05 13:15:11 -0800239 /* blkg holds a reference to blkcg */
Tejun Heoec903c02014-05-13 12:11:01 -0400240 if (!css_tryget_online(&blkcg->css)) {
Tejun Heo20386ce2015-08-18 14:55:28 -0700241 ret = -ENODEV;
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800242 goto err_free_blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700243 }
Tejun Heocd1604f2012-03-05 13:15:06 -0800244
Tahsin Erdogan7fc6b872017-03-09 00:05:31 -0800245 wb_congested = wb_congested_get_create(q->backing_dev_info,
Jens Axboed708f0d2017-03-29 11:25:48 -0600246 blkcg->css.id,
247 GFP_NOWAIT | __GFP_NOWARN);
248 if (!wb_congested) {
Tahsin Erdogan7fc6b872017-03-09 00:05:31 -0800249 ret = -ENOMEM;
Jens Axboed708f0d2017-03-29 11:25:48 -0600250 goto err_put_css;
Tahsin Erdogan7fc6b872017-03-09 00:05:31 -0800251 }
252
Jens Axboed708f0d2017-03-29 11:25:48 -0600253 /* allocate */
254 if (!new_blkg) {
255 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
256 if (unlikely(!new_blkg)) {
257 ret = -ENOMEM;
258 goto err_put_congested;
259 }
260 }
261 blkg = new_blkg;
Tahsin Erdogan7fc6b872017-03-09 00:05:31 -0800262 blkg->wb_congested = wb_congested;
263
Tejun Heodb613672013-05-14 13:52:31 -0700264 /* link parent */
Tejun Heo3c547862013-01-09 08:05:10 -0800265 if (blkcg_parent(blkcg)) {
266 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
267 if (WARN_ON_ONCE(!blkg->parent)) {
Tejun Heo20386ce2015-08-18 14:55:28 -0700268 ret = -ENODEV;
Jens Axboed708f0d2017-03-29 11:25:48 -0600269 goto err_put_congested;
Tejun Heo3c547862013-01-09 08:05:10 -0800270 }
271 blkg_get(blkg->parent);
272 }
273
Tejun Heodb613672013-05-14 13:52:31 -0700274 /* invoke per-policy init */
275 for (i = 0; i < BLKCG_MAX_POLS; i++) {
276 struct blkcg_policy *pol = blkcg_policy[i];
277
278 if (blkg->pd[i] && pol->pd_init_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -0700279 pol->pd_init_fn(blkg->pd[i]);
Tejun Heodb613672013-05-14 13:52:31 -0700280 }
281
282 /* insert */
Tejun Heoa6371202012-04-19 16:29:24 -0700283 spin_lock(&blkcg->lock);
284 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
285 if (likely(!ret)) {
286 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
287 list_add(&blkg->q_node, &q->blkg_list);
Tejun Heof427d902013-01-09 08:05:12 -0800288
289 for (i = 0; i < BLKCG_MAX_POLS; i++) {
290 struct blkcg_policy *pol = blkcg_policy[i];
291
292 if (blkg->pd[i] && pol->pd_online_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -0700293 pol->pd_online_fn(blkg->pd[i]);
Tejun Heof427d902013-01-09 08:05:12 -0800294 }
Tejun Heoa6371202012-04-19 16:29:24 -0700295 }
Tejun Heof427d902013-01-09 08:05:12 -0800296 blkg->online = true;
Tejun Heoa6371202012-04-19 16:29:24 -0700297 spin_unlock(&blkcg->lock);
298
Tejun Heoec13b1d2015-05-22 17:13:19 -0400299 if (!ret)
Tejun Heoa6371202012-04-19 16:29:24 -0700300 return blkg;
Tejun Heo15974992012-06-04 20:40:52 -0700301
Tejun Heo3c547862013-01-09 08:05:10 -0800302 /* @blkg failed fully initialized, use the usual release path */
303 blkg_put(blkg);
304 return ERR_PTR(ret);
305
Jens Axboed708f0d2017-03-29 11:25:48 -0600306err_put_congested:
307 wb_congested_put(wb_congested);
308err_put_css:
Tejun Heo496fb782012-04-19 16:29:23 -0700309 css_put(&blkcg->css);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800310err_free_blkg:
Jens Axboed708f0d2017-03-29 11:25:48 -0600311 blkg_free(new_blkg);
Tejun Heo93e6d5d2013-01-09 08:05:10 -0800312 return ERR_PTR(ret);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500313}
Tejun Heo3c96cb32012-04-13 13:11:34 -0700314
Tejun Heo86cde6b2013-01-09 08:05:10 -0800315/**
Dennis Zhoub9789622018-12-05 12:10:27 -0500316 * __blkg_lookup_create - lookup blkg, try to create one if not there
Tejun Heo86cde6b2013-01-09 08:05:10 -0800317 * @blkcg: blkcg of interest
318 * @q: request_queue of interest
319 *
320 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
Tejun Heo3c547862013-01-09 08:05:10 -0800321 * create one. blkg creation is performed recursively from blkcg_root such
322 * that all non-root blkg's have access to the parent blkg. This function
323 * should be called under RCU read lock and @q->queue_lock.
Tejun Heo86cde6b2013-01-09 08:05:10 -0800324 *
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500325 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
326 * down from root.
Tejun Heo86cde6b2013-01-09 08:05:10 -0800327 */
Dennis Zhoub9789622018-12-05 12:10:27 -0500328struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
329 struct request_queue *q)
Tejun Heo3c96cb32012-04-13 13:11:34 -0700330{
Tejun Heo86cde6b2013-01-09 08:05:10 -0800331 struct blkcg_gq *blkg;
332
333 WARN_ON_ONCE(!rcu_read_lock_held());
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700334 lockdep_assert_held(&q->queue_lock);
Tejun Heo86cde6b2013-01-09 08:05:10 -0800335
Tejun Heo86cde6b2013-01-09 08:05:10 -0800336 blkg = __blkg_lookup(blkcg, q, true);
337 if (blkg)
338 return blkg;
339
Tejun Heo3c547862013-01-09 08:05:10 -0800340 /*
341 * Create blkgs walking down from blkcg_root to @blkcg, so that all
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500342 * non-root blkgs have access to their parents. Returns the closest
343 * blkg to the intended blkg should blkg_create() fail.
Tejun Heo3c547862013-01-09 08:05:10 -0800344 */
345 while (true) {
346 struct blkcg *pos = blkcg;
347 struct blkcg *parent = blkcg_parent(blkcg);
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500348 struct blkcg_gq *ret_blkg = q->root_blkg;
Tejun Heo3c547862013-01-09 08:05:10 -0800349
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500350 while (parent) {
351 blkg = __blkg_lookup(parent, q, false);
352 if (blkg) {
353 /* remember closest blkg */
354 ret_blkg = blkg;
355 break;
356 }
Tejun Heo3c547862013-01-09 08:05:10 -0800357 pos = parent;
358 parent = blkcg_parent(parent);
359 }
360
Jens Axboed708f0d2017-03-29 11:25:48 -0600361 blkg = blkg_create(pos, q, NULL);
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500362 if (IS_ERR(blkg))
363 return ret_blkg;
364 if (pos == blkcg)
Tejun Heo3c547862013-01-09 08:05:10 -0800365 return blkg;
366 }
Tejun Heo3c96cb32012-04-13 13:11:34 -0700367}
Vivek Goyal31e4c282009-12-03 12:59:42 -0500368
Dennis Zhoub9789622018-12-05 12:10:27 -0500369/**
370 * blkg_lookup_create - find or create a blkg
371 * @blkcg: target block cgroup
372 * @q: target request_queue
373 *
374 * This looks up or creates the blkg representing the unique pair
375 * of the blkcg and the request_queue.
376 */
377struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
378 struct request_queue *q)
379{
380 struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
381
382 if (unlikely(!blkg)) {
Ming Lei3a762de2018-12-20 00:29:15 +0800383 unsigned long flags;
384
385 spin_lock_irqsave(&q->queue_lock, flags);
Dennis Zhoub9789622018-12-05 12:10:27 -0500386 blkg = __blkg_lookup_create(blkcg, q);
Ming Lei3a762de2018-12-20 00:29:15 +0800387 spin_unlock_irqrestore(&q->queue_lock, flags);
Dennis Zhoub9789622018-12-05 12:10:27 -0500388 }
389
390 return blkg;
391}
392
Tejun Heo3c798392012-04-16 13:57:25 -0700393static void blkg_destroy(struct blkcg_gq *blkg)
Tejun Heo72e06c22012-03-05 13:15:00 -0800394{
Tejun Heo3c798392012-04-16 13:57:25 -0700395 struct blkcg *blkcg = blkg->blkcg;
Tejun Heo77ea7332015-08-18 14:55:24 -0700396 struct blkcg_gq *parent = blkg->parent;
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -0400397 int i;
Tejun Heo03aa2642012-03-05 13:15:19 -0800398
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700399 lockdep_assert_held(&blkg->q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800400 lockdep_assert_held(&blkcg->lock);
Tejun Heo03aa2642012-03-05 13:15:19 -0800401
402 /* Something wrong if we are trying to remove same group twice */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800403 WARN_ON_ONCE(list_empty(&blkg->q_node));
Tejun Heo9f13ef62012-03-05 13:15:21 -0800404 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
Tejun Heoa6371202012-04-19 16:29:24 -0700405
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -0400406 for (i = 0; i < BLKCG_MAX_POLS; i++) {
407 struct blkcg_policy *pol = blkcg_policy[i];
408
409 if (blkg->pd[i] && pol->pd_offline_fn)
410 pol->pd_offline_fn(blkg->pd[i]);
411 }
412
Tejun Heo77ea7332015-08-18 14:55:24 -0700413 if (parent) {
414 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
415 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
416 }
417
Tejun Heof427d902013-01-09 08:05:12 -0800418 blkg->online = false;
419
Tejun Heoa6371202012-04-19 16:29:24 -0700420 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800421 list_del_init(&blkg->q_node);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800422 hlist_del_init_rcu(&blkg->blkcg_node);
Tejun Heo03aa2642012-03-05 13:15:19 -0800423
Tejun Heo03aa2642012-03-05 13:15:19 -0800424 /*
Tejun Heoa6371202012-04-19 16:29:24 -0700425 * Both setting lookup hint to and clearing it from @blkg are done
426 * under queue_lock. If it's not pointing to @blkg now, it never
427 * will. Hint assignment itself can race safely.
428 */
Paul E. McKenneyec6c676a2014-02-17 13:35:57 -0800429 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
Tejun Heoa6371202012-04-19 16:29:24 -0700430 rcu_assign_pointer(blkcg->blkg_hint, NULL);
431
432 /*
Tejun Heo03aa2642012-03-05 13:15:19 -0800433 * Put the reference taken at the time of creation so that when all
434 * queues are gone, group can be destroyed.
435 */
Dennis Zhou7fcf2b02018-12-05 12:10:38 -0500436 percpu_ref_kill(&blkg->refcnt);
Tejun Heo03aa2642012-03-05 13:15:19 -0800437}
438
Tejun Heo9f13ef62012-03-05 13:15:21 -0800439/**
440 * blkg_destroy_all - destroy all blkgs associated with a request_queue
441 * @q: request_queue of interest
Tejun Heo9f13ef62012-03-05 13:15:21 -0800442 *
Tejun Heo3c96cb32012-04-13 13:11:34 -0700443 * Destroy all blkgs associated with @q.
Tejun Heo9f13ef62012-03-05 13:15:21 -0800444 */
Tejun Heo3c96cb32012-04-13 13:11:34 -0700445static void blkg_destroy_all(struct request_queue *q)
Tejun Heo03aa2642012-03-05 13:15:19 -0800446{
Tejun Heo3c798392012-04-16 13:57:25 -0700447 struct blkcg_gq *blkg, *n;
Tejun Heo72e06c22012-03-05 13:15:00 -0800448
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700449 spin_lock_irq(&q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800450 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
Tejun Heo3c798392012-04-16 13:57:25 -0700451 struct blkcg *blkcg = blkg->blkcg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800452
Tejun Heo9f13ef62012-03-05 13:15:21 -0800453 spin_lock(&blkcg->lock);
454 blkg_destroy(blkg);
455 spin_unlock(&blkcg->lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800456 }
Tejun Heo6fe810b2015-09-05 15:47:36 -0400457
458 q->root_blkg = NULL;
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700459 spin_unlock_irq(&q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800460}
461
Tejun Heo182446d2013-08-08 20:11:24 -0400462static int blkcg_reset_stats(struct cgroup_subsys_state *css,
463 struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700464{
Tejun Heo182446d2013-08-08 20:11:24 -0400465 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo3c798392012-04-16 13:57:25 -0700466 struct blkcg_gq *blkg;
Tejun Heobc0d6502012-04-13 13:11:26 -0700467 int i;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700468
Tejun Heo838f13b2015-07-09 16:39:47 -0400469 mutex_lock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700470 spin_lock_irq(&blkcg->lock);
Tejun Heo997a0262012-03-08 10:53:58 -0800471
472 /*
473 * Note that stat reset is racy - it doesn't synchronize against
474 * stat updates. This is a debug feature which shouldn't exist
475 * anyway. If you get hit by a race, retry.
476 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800477 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
Tejun Heo77ea7332015-08-18 14:55:24 -0700478 blkg_rwstat_reset(&blkg->stat_bytes);
479 blkg_rwstat_reset(&blkg->stat_ios);
480
Tejun Heo8bd435b2012-04-13 13:11:28 -0700481 for (i = 0; i < BLKCG_MAX_POLS; i++) {
Tejun Heo3c798392012-04-16 13:57:25 -0700482 struct blkcg_policy *pol = blkcg_policy[i];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800483
Tejun Heoa9520cd2015-08-18 14:55:14 -0700484 if (blkg->pd[i] && pol->pd_reset_stats_fn)
485 pol->pd_reset_stats_fn(blkg->pd[i]);
Tejun Heobc0d6502012-04-13 13:11:26 -0700486 }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700487 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400488
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700489 spin_unlock_irq(&blkcg->lock);
Tejun Heobc0d6502012-04-13 13:11:26 -0700490 mutex_unlock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700491 return 0;
492}
493
Tejun Heodd165eb2015-08-18 14:55:33 -0700494const char *blkg_dev_name(struct blkcg_gq *blkg)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700495{
Tejun Heod3d32e62012-04-01 14:38:42 -0700496 /* some drivers (floppy) instantiate a queue w/o disk registered */
Jan Karadc3b17c2017-02-02 15:56:50 +0100497 if (blkg->q->backing_dev_info->dev)
498 return dev_name(blkg->q->backing_dev_info->dev);
Tejun Heod3d32e62012-04-01 14:38:42 -0700499 return NULL;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700500}
501
Tejun Heod3d32e62012-04-01 14:38:42 -0700502/**
503 * blkcg_print_blkgs - helper for printing per-blkg data
504 * @sf: seq_file to print to
505 * @blkcg: blkcg of interest
506 * @prfill: fill function to print out a blkg
507 * @pol: policy in question
508 * @data: data to be passed to @prfill
509 * @show_total: to print out sum of prfill return values or not
510 *
511 * This function invokes @prfill on each blkg of @blkcg if pd for the
512 * policy specified by @pol exists. @prfill is invoked with @sf, the
Tejun Heo810ecfa2013-01-09 08:05:13 -0800513 * policy data and @data and the matching queue lock held. If @show_total
514 * is %true, the sum of the return values from @prfill is printed with
515 * "Total" label at the end.
Tejun Heod3d32e62012-04-01 14:38:42 -0700516 *
517 * This is to be used to construct print functions for
518 * cftype->read_seq_string method.
519 */
Tejun Heo3c798392012-04-16 13:57:25 -0700520void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700521 u64 (*prfill)(struct seq_file *,
522 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700523 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700524 bool show_total)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400525{
Tejun Heo3c798392012-04-16 13:57:25 -0700526 struct blkcg_gq *blkg;
Tejun Heod3d32e62012-04-01 14:38:42 -0700527 u64 total = 0;
528
Tejun Heo810ecfa2013-01-09 08:05:13 -0800529 rcu_read_lock();
Linus Torvaldsee89f812013-02-28 12:52:24 -0800530 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700531 spin_lock_irq(&blkg->q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -0700532 if (blkcg_policy_enabled(blkg->q, pol))
Tejun Heof95a04a2012-04-16 13:57:26 -0700533 total += prfill(sf, blkg->pd[pol->plid], data);
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700534 spin_unlock_irq(&blkg->q->queue_lock);
Tejun Heo810ecfa2013-01-09 08:05:13 -0800535 }
536 rcu_read_unlock();
Tejun Heod3d32e62012-04-01 14:38:42 -0700537
538 if (show_total)
539 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
540}
Tejun Heo829fdb52012-04-01 14:38:43 -0700541EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
Tejun Heod3d32e62012-04-01 14:38:42 -0700542
543/**
544 * __blkg_prfill_u64 - prfill helper for a single u64 value
545 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700546 * @pd: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700547 * @v: value to print
548 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700549 * Print @v to @sf for the device assocaited with @pd.
Tejun Heod3d32e62012-04-01 14:38:42 -0700550 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700551u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
Tejun Heod3d32e62012-04-01 14:38:42 -0700552{
Tejun Heof95a04a2012-04-16 13:57:26 -0700553 const char *dname = blkg_dev_name(pd->blkg);
Tejun Heod3d32e62012-04-01 14:38:42 -0700554
555 if (!dname)
556 return 0;
557
558 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
559 return v;
560}
Tejun Heo829fdb52012-04-01 14:38:43 -0700561EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
Tejun Heod3d32e62012-04-01 14:38:42 -0700562
563/**
564 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
565 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700566 * @pd: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700567 * @rwstat: rwstat to print
568 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700569 * Print @rwstat to @sf for the device assocaited with @pd.
Tejun Heod3d32e62012-04-01 14:38:42 -0700570 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700571u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200572 const struct blkg_rwstat_sample *rwstat)
Tejun Heod3d32e62012-04-01 14:38:42 -0700573{
574 static const char *rwstr[] = {
575 [BLKG_RWSTAT_READ] = "Read",
576 [BLKG_RWSTAT_WRITE] = "Write",
577 [BLKG_RWSTAT_SYNC] = "Sync",
578 [BLKG_RWSTAT_ASYNC] = "Async",
Tejun Heo636620b2018-07-18 04:47:41 -0700579 [BLKG_RWSTAT_DISCARD] = "Discard",
Tejun Heod3d32e62012-04-01 14:38:42 -0700580 };
Tejun Heof95a04a2012-04-16 13:57:26 -0700581 const char *dname = blkg_dev_name(pd->blkg);
Tejun Heod3d32e62012-04-01 14:38:42 -0700582 u64 v;
583 int i;
584
585 if (!dname)
586 return 0;
587
588 for (i = 0; i < BLKG_RWSTAT_NR; i++)
589 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200590 rwstat->cnt[i]);
Tejun Heod3d32e62012-04-01 14:38:42 -0700591
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200592 v = rwstat->cnt[BLKG_RWSTAT_READ] +
593 rwstat->cnt[BLKG_RWSTAT_WRITE] +
594 rwstat->cnt[BLKG_RWSTAT_DISCARD];
595 seq_printf(sf, "%s Total %llu\n", dname, v);
Tejun Heod3d32e62012-04-01 14:38:42 -0700596 return v;
597}
Tejun Heob50da392013-01-09 08:05:12 -0800598EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700599
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700600/**
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
602 * @sf: seq_file to print to
Tejun Heof95a04a2012-04-16 13:57:26 -0700603 * @pd: policy private data of interest
604 * @off: offset to the blkg_rwstat in @pd
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700605 *
606 * prfill callback for printing a blkg_rwstat.
607 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700608u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
609 int off)
Tejun Heod3d32e62012-04-01 14:38:42 -0700610{
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200611 struct blkg_rwstat_sample rwstat = { };
Tejun Heod3d32e62012-04-01 14:38:42 -0700612
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200613 blkg_rwstat_read((void *)pd + off, &rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700614 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700615}
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700616EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700617
Tejun Heo77ea7332015-08-18 14:55:24 -0700618static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
619 struct blkg_policy_data *pd, int off)
620{
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200621 struct blkg_rwstat_sample rwstat = { };
Tejun Heo77ea7332015-08-18 14:55:24 -0700622
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200623 blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
Tejun Heo77ea7332015-08-18 14:55:24 -0700624 return __blkg_prfill_rwstat(sf, pd, &rwstat);
625}
626
627/**
628 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
629 * @sf: seq_file to print to
630 * @v: unused
631 *
632 * To be used as cftype->seq_show to print blkg->stat_bytes.
633 * cftype->private must be set to the blkcg_policy.
634 */
635int blkg_print_stat_bytes(struct seq_file *sf, void *v)
636{
637 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
638 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
639 offsetof(struct blkcg_gq, stat_bytes), true);
640 return 0;
641}
642EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
643
644/**
645 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
646 * @sf: seq_file to print to
647 * @v: unused
648 *
649 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
650 * must be set to the blkcg_policy.
651 */
652int blkg_print_stat_ios(struct seq_file *sf, void *v)
653{
654 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
655 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
656 offsetof(struct blkcg_gq, stat_ios), true);
657 return 0;
658}
659EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
660
661static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
662 struct blkg_policy_data *pd,
663 int off)
664{
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200665 struct blkg_rwstat_sample rwstat;
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200666
667 blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
Tejun Heo77ea7332015-08-18 14:55:24 -0700668 return __blkg_prfill_rwstat(sf, pd, &rwstat);
669}
670
671/**
672 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
673 * @sf: seq_file to print to
674 * @v: unused
675 */
676int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
677{
678 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
679 blkg_prfill_rwstat_field_recursive,
680 (void *)seq_cft(sf)->private,
681 offsetof(struct blkcg_gq, stat_bytes), true);
682 return 0;
683}
684EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
685
686/**
687 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
688 * @sf: seq_file to print to
689 * @v: unused
690 */
691int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
692{
693 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
694 blkg_prfill_rwstat_field_recursive,
695 (void *)seq_cft(sf)->private,
696 offsetof(struct blkcg_gq, stat_ios), true);
697 return 0;
698}
699EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
700
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700701/**
Tejun Heo16b3de62013-01-09 08:05:12 -0800702 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
Tejun Heof12c74c2015-08-18 14:55:23 -0700703 * @blkg: blkg of interest
704 * @pol: blkcg_policy which contains the blkg_rwstat
705 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200706 * @sum: blkg_rwstat_sample structure containing the results
Tejun Heo16b3de62013-01-09 08:05:12 -0800707 *
Tejun Heof12c74c2015-08-18 14:55:23 -0700708 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
709 * online descendants and their aux counts. The caller must be holding the
710 * queue lock for online tests.
711 *
712 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
713 * is at @off bytes into @blkg's blkg_policy_data of the policy.
Tejun Heo16b3de62013-01-09 08:05:12 -0800714 */
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200715void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200716 int off, struct blkg_rwstat_sample *sum)
Tejun Heo16b3de62013-01-09 08:05:12 -0800717{
Tejun Heo16b3de62013-01-09 08:05:12 -0800718 struct blkcg_gq *pos_blkg;
Tejun Heo492eb212013-08-08 20:11:25 -0400719 struct cgroup_subsys_state *pos_css;
Christoph Hellwig239eeb02019-06-06 12:26:19 +0200720 unsigned int i;
Tejun Heo16b3de62013-01-09 08:05:12 -0800721
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700722 lockdep_assert_held(&blkg->q->queue_lock);
Tejun Heo16b3de62013-01-09 08:05:12 -0800723
Tejun Heo16b3de62013-01-09 08:05:12 -0800724 rcu_read_lock();
Tejun Heof12c74c2015-08-18 14:55:23 -0700725 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
Tejun Heo3a7faea2015-08-18 14:55:26 -0700726 struct blkg_rwstat *rwstat;
Tejun Heo16b3de62013-01-09 08:05:12 -0800727
728 if (!pos_blkg->online)
729 continue;
730
Tejun Heof12c74c2015-08-18 14:55:23 -0700731 if (pol)
732 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
733 else
734 rwstat = (void *)pos_blkg + off;
735
Tejun Heo16b3de62013-01-09 08:05:12 -0800736 for (i = 0; i < BLKG_RWSTAT_NR; i++)
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200737 sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
Tejun Heo16b3de62013-01-09 08:05:12 -0800738 }
739 rcu_read_unlock();
Tejun Heo16b3de62013-01-09 08:05:12 -0800740}
741EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
742
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600743/* Performs queue bypass and policy enabled checks then looks up blkg. */
744static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
745 const struct blkcg_policy *pol,
746 struct request_queue *q)
747{
748 WARN_ON_ONCE(!rcu_read_lock_held());
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700749 lockdep_assert_held(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600750
751 if (!blkcg_policy_enabled(q, pol))
752 return ERR_PTR(-EOPNOTSUPP);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600753 return __blkg_lookup(blkcg, q, true /* update_hint */);
754}
755
Tejun Heo16b3de62013-01-09 08:05:12 -0800756/**
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700757 * blkg_conf_prep - parse and prepare for per-blkg config update
Tejun Heo015d2542019-08-28 15:05:53 -0700758 * @inputp: input string pointer
759 *
760 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
761 * from @input and get and return the matching gendisk. *@inputp is
762 * updated to point past the device node prefix. Returns an ERR_PTR()
763 * value on error.
764 *
765 * Use this function iff blkg_conf_prep() can't be used for some reason.
766 */
767struct gendisk *blkcg_conf_get_disk(char **inputp)
768{
769 char *input = *inputp;
770 unsigned int major, minor;
771 struct gendisk *disk;
772 int key_len, part;
773
774 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
775 return ERR_PTR(-EINVAL);
776
777 input += key_len;
778 if (!isspace(*input))
779 return ERR_PTR(-EINVAL);
780 input = skip_spaces(input);
781
782 disk = get_gendisk(MKDEV(major, minor), &part);
783 if (!disk)
784 return ERR_PTR(-ENODEV);
785 if (part) {
786 put_disk_and_module(disk);
787 return ERR_PTR(-ENODEV);
788 }
789
790 *inputp = input;
791 return disk;
792}
793
794/**
795 * blkg_conf_prep - parse and prepare for per-blkg config update
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700796 * @blkcg: target block cgroup
Tejun Heoda8b0662012-04-13 13:11:29 -0700797 * @pol: target policy
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700798 * @input: input string
799 * @ctx: blkg_conf_ctx to be filled
800 *
801 * Parse per-blkg config update from @input and initialize @ctx with the
Tejun Heo36aa9e52015-08-18 14:55:31 -0700802 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
803 * part of @input following MAJ:MIN. This function returns with RCU read
804 * lock and queue lock held and must be paired with blkg_conf_finish().
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700805 */
Tejun Heo3c798392012-04-16 13:57:25 -0700806int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
Tejun Heo36aa9e52015-08-18 14:55:31 -0700807 char *input, struct blkg_conf_ctx *ctx)
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700808 __acquires(rcu) __acquires(&disk->queue->queue_lock)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800809{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700810 struct gendisk *disk;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600811 struct request_queue *q;
Tejun Heo3c798392012-04-16 13:57:25 -0700812 struct blkcg_gq *blkg;
Tejun Heo015d2542019-08-28 15:05:53 -0700813 int ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800814
Tejun Heo015d2542019-08-28 15:05:53 -0700815 disk = blkcg_conf_get_disk(&input);
816 if (IS_ERR(disk))
817 return PTR_ERR(disk);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800818
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600819 q = disk->queue;
820
Tejun Heoe56da7e2012-03-05 13:15:07 -0800821 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700822 spin_lock_irq(&q->queue_lock);
Tejun Heoda8b0662012-04-13 13:11:29 -0700823
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600824 blkg = blkg_lookup_check(blkcg, pol, q);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800825 if (IS_ERR(blkg)) {
826 ret = PTR_ERR(blkg);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600827 goto fail_unlock;
Vivek Goyal062a6442010-09-15 17:06:33 -0400828 }
Tejun Heoe56da7e2012-03-05 13:15:07 -0800829
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600830 if (blkg)
831 goto success;
832
833 /*
834 * Create blkgs walking down from blkcg_root to @blkcg, so that all
835 * non-root blkgs have access to their parents.
836 */
837 while (true) {
838 struct blkcg *pos = blkcg;
839 struct blkcg *parent;
840 struct blkcg_gq *new_blkg;
841
842 parent = blkcg_parent(blkcg);
843 while (parent && !__blkg_lookup(parent, q, false)) {
844 pos = parent;
845 parent = blkcg_parent(parent);
846 }
847
848 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700849 spin_unlock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600850 rcu_read_unlock();
851
852 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
853 if (unlikely(!new_blkg)) {
854 ret = -ENOMEM;
855 goto fail;
856 }
857
858 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700859 spin_lock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600860
861 blkg = blkg_lookup_check(pos, pol, q);
862 if (IS_ERR(blkg)) {
863 ret = PTR_ERR(blkg);
864 goto fail_unlock;
865 }
866
867 if (blkg) {
868 blkg_free(new_blkg);
869 } else {
870 blkg = blkg_create(pos, q, new_blkg);
Kefeng Wang98d669b2019-06-05 22:24:27 +0800871 if (IS_ERR(blkg)) {
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600872 ret = PTR_ERR(blkg);
873 goto fail_unlock;
874 }
875 }
876
877 if (pos == blkcg)
878 goto success;
879 }
880success:
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700881 ctx->disk = disk;
882 ctx->blkg = blkg;
Tejun Heo015d2542019-08-28 15:05:53 -0700883 ctx->body = input;
Tejun Heo726fa692012-04-01 14:38:43 -0700884 return 0;
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600885
886fail_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700887 spin_unlock_irq(&q->queue_lock);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600888 rcu_read_unlock();
889fail:
Jan Kara9df6c292018-02-26 13:01:39 +0100890 put_disk_and_module(disk);
Tahsin Erdogan457e490f2017-03-29 11:27:19 -0600891 /*
892 * If queue was bypassing, we should retry. Do so after a
893 * short msleep(). It isn't strictly necessary but queue
894 * can be bypassing for some time and it's always nice to
895 * avoid busy looping.
896 */
897 if (ret == -EBUSY) {
898 msleep(10);
899 ret = restart_syscall();
900 }
901 return ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800902}
903
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700904/**
905 * blkg_conf_finish - finish up per-blkg config update
906 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
907 *
908 * Finish up after per-blkg config update. This function must be paired
909 * with blkg_conf_prep().
910 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700911void blkg_conf_finish(struct blkg_conf_ctx *ctx)
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700912 __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800913{
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700914 spin_unlock_irq(&ctx->disk->queue->queue_lock);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700915 rcu_read_unlock();
Jan Kara9df6c292018-02-26 13:01:39 +0100916 put_disk_and_module(ctx->disk);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800917}
918
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700919static int blkcg_print_stat(struct seq_file *sf, void *v)
920{
921 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
922 struct blkcg_gq *blkg;
923
924 rcu_read_lock();
925
926 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
927 const char *dname;
Josef Bacik903d23f2018-07-03 11:14:52 -0400928 char *buf;
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200929 struct blkg_rwstat_sample rwstat;
Tejun Heo636620b2018-07-18 04:47:41 -0700930 u64 rbytes, wbytes, rios, wios, dbytes, dios;
Josef Bacik903d23f2018-07-03 11:14:52 -0400931 size_t size = seq_get_buf(sf, &buf), off = 0;
932 int i;
933 bool has_stats = false;
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700934
935 dname = blkg_dev_name(blkg);
936 if (!dname)
937 continue;
938
Josef Bacik903d23f2018-07-03 11:14:52 -0400939 /*
940 * Hooray string manipulation, count is the size written NOT
941 * INCLUDING THE \0, so size is now count+1 less than what we
942 * had before, but we want to start writing the next bit from
943 * the \0 so we only add count to buf.
944 */
945 off += scnprintf(buf+off, size-off, "%s ", dname);
946
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700947 spin_lock_irq(&blkg->q->queue_lock);
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700948
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200949 blkg_rwstat_recursive_sum(blkg, NULL,
950 offsetof(struct blkcg_gq, stat_bytes), &rwstat);
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200951 rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
952 wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
953 dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700954
Christoph Hellwig5d0b6e42019-06-06 12:26:20 +0200955 blkg_rwstat_recursive_sum(blkg, NULL,
956 offsetof(struct blkcg_gq, stat_ios), &rwstat);
Christoph Hellwig7af6fd92019-06-06 12:26:21 +0200957 rios = rwstat.cnt[BLKG_RWSTAT_READ];
958 wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
959 dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700960
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700961 spin_unlock_irq(&blkg->q->queue_lock);
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700962
Josef Bacik903d23f2018-07-03 11:14:52 -0400963 if (rbytes || wbytes || rios || wios) {
964 has_stats = true;
965 off += scnprintf(buf+off, size-off,
Tejun Heo636620b2018-07-18 04:47:41 -0700966 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
967 rbytes, wbytes, rios, wios,
968 dbytes, dios);
Josef Bacik903d23f2018-07-03 11:14:52 -0400969 }
970
Tejun Heo07b0fde2019-07-16 07:58:31 -0700971 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
Josef Bacikd09d8df2018-07-03 11:14:55 -0400972 has_stats = true;
973 off += scnprintf(buf+off, size-off,
974 " use_delay=%d delay_nsec=%llu",
975 atomic_read(&blkg->use_delay),
976 (unsigned long long)atomic64_read(&blkg->delay_nsec));
977 }
978
Josef Bacik903d23f2018-07-03 11:14:52 -0400979 for (i = 0; i < BLKCG_MAX_POLS; i++) {
980 struct blkcg_policy *pol = blkcg_policy[i];
981 size_t written;
982
983 if (!blkg->pd[i] || !pol->pd_stat_fn)
984 continue;
985
986 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
987 if (written)
988 has_stats = true;
989 off += written;
990 }
Tejun Heo07b0fde2019-07-16 07:58:31 -0700991
Josef Bacik903d23f2018-07-03 11:14:52 -0400992 if (has_stats) {
Tejun Heof539da82019-06-13 15:30:38 -0700993 if (off < size - 1) {
994 off += scnprintf(buf+off, size-off, "\n");
995 seq_commit(sf, off);
996 } else {
997 seq_commit(sf, -1);
998 }
Josef Bacik903d23f2018-07-03 11:14:52 -0400999 }
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001000 }
1001
1002 rcu_read_unlock();
1003 return 0;
1004}
1005
Bart Van Asschee1f3b942016-06-14 17:04:32 +02001006static struct cftype blkcg_files[] = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001007 {
1008 .name = "stat",
Tejun Heoca0752c2015-10-22 09:48:37 +09001009 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001010 .seq_show = blkcg_print_stat,
1011 },
1012 { } /* terminate */
1013};
1014
Bart Van Asschee1f3b942016-06-14 17:04:32 +02001015static struct cftype blkcg_legacy_files[] = {
Vivek Goyal31e4c282009-12-03 12:59:42 -05001016 {
Divyesh Shah84c124d2010-04-09 08:31:19 +02001017 .name = "reset_stats",
Tejun Heo3c798392012-04-16 13:57:25 -07001018 .write_u64 = blkcg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -05001019 },
Tejun Heo4baf6e32012-04-01 12:09:55 -07001020 { } /* terminate */
Vivek Goyal31e4c282009-12-03 12:59:42 -05001021};
1022
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -04001023/*
1024 * blkcg destruction is a three-stage process.
1025 *
1026 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
1027 * which offlines writeback. Here we tie the next stage of blkg destruction
1028 * to the completion of writeback associated with the blkcg. This lets us
1029 * avoid punting potentially large amounts of outstanding writeback to root
1030 * while maintaining any ongoing policies. The next stage is triggered when
1031 * the nr_cgwbs count goes to zero.
1032 *
1033 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1034 * and handles the destruction of blkgs. Here the css reference held by
1035 * the blkg is put back eventually allowing blkcg_css_free() to be called.
1036 * This work may occur in cgwb_release_workfn() on the cgwb_release
1037 * workqueue. Any submitted ios that fail to get the blkg ref will be
1038 * punted to the root_blkg.
1039 *
1040 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1041 * This finally frees the blkcg.
1042 */
1043
Tejun Heo9f13ef62012-03-05 13:15:21 -08001044/**
Tejun Heo92fb9742012-11-19 08:13:38 -08001045 * blkcg_css_offline - cgroup css_offline callback
Tejun Heoeb954192013-08-08 20:11:23 -04001046 * @css: css of interest
Tejun Heo9f13ef62012-03-05 13:15:21 -08001047 *
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -04001048 * This function is called when @css is about to go away. Here the cgwbs are
1049 * offlined first and only once writeback associated with the blkcg has
1050 * finished do we start step 2 (see above).
Tejun Heo9f13ef62012-03-05 13:15:21 -08001051 */
Tejun Heoeb954192013-08-08 20:11:23 -04001052static void blkcg_css_offline(struct cgroup_subsys_state *css)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001053{
Tejun Heoeb954192013-08-08 20:11:23 -04001054 struct blkcg *blkcg = css_to_blkcg(css);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001055
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -04001056 /* this prevents anyone from attaching or migrating to this blkcg */
1057 wb_blkcg_offline(blkcg);
1058
1059 /* put the base cgwb reference allowing step 2 to be triggered */
1060 blkcg_cgwb_put(blkcg);
1061}
1062
1063/**
1064 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1065 * @blkcg: blkcg of interest
1066 *
1067 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1068 * is nested inside q lock, this function performs reverse double lock dancing.
1069 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1070 * blkcg_css_free to eventually be called.
1071 *
1072 * This is the blkcg counterpart of ioc_release_fn().
1073 */
1074void blkcg_destroy_blkgs(struct blkcg *blkcg)
1075{
Tejun Heo9f13ef62012-03-05 13:15:21 -08001076 spin_lock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001077
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001078 while (!hlist_empty(&blkcg->blkg_list)) {
1079 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1080 struct blkcg_gq, blkcg_node);
Tejun Heoc875f4d2012-03-05 13:15:22 -08001081 struct request_queue *q = blkg->q;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001082
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001083 if (spin_trylock(&q->queue_lock)) {
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001084 blkg_destroy(blkg);
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001085 spin_unlock(&q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -08001086 } else {
1087 spin_unlock_irq(&blkcg->lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -08001088 cpu_relax();
Dan Carpentera5567932012-03-29 20:57:08 +02001089 spin_lock_irq(&blkcg->lock);
Jens Axboe0f3942a2010-05-03 14:28:55 +02001090 }
Tejun Heo9f13ef62012-03-05 13:15:21 -08001091 }
Jens Axboe0f3942a2010-05-03 14:28:55 +02001092
Tejun Heo9f13ef62012-03-05 13:15:21 -08001093 spin_unlock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -08001094}
1095
Tejun Heoeb954192013-08-08 20:11:23 -04001096static void blkcg_css_free(struct cgroup_subsys_state *css)
Tejun Heo7ee9c562012-03-05 13:15:11 -08001097{
Tejun Heoeb954192013-08-08 20:11:23 -04001098 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heobc915e62015-08-18 14:55:08 -07001099 int i;
Tejun Heo7ee9c562012-03-05 13:15:11 -08001100
Tejun Heo7876f932015-07-09 16:39:49 -04001101 mutex_lock(&blkcg_pol_mutex);
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001102
Tejun Heo7876f932015-07-09 16:39:49 -04001103 list_del(&blkcg->all_blkcgs_node);
Tejun Heo7876f932015-07-09 16:39:49 -04001104
Tejun Heobc915e62015-08-18 14:55:08 -07001105 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001106 if (blkcg->cpd[i])
1107 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1108
1109 mutex_unlock(&blkcg_pol_mutex);
1110
Tejun Heobc915e62015-08-18 14:55:08 -07001111 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001112}
1113
Tejun Heoeb954192013-08-08 20:11:23 -04001114static struct cgroup_subsys_state *
1115blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001116{
Tejun Heo3c798392012-04-16 13:57:25 -07001117 struct blkcg *blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001118 struct cgroup_subsys_state *ret;
1119 int i;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001120
Tejun Heo7876f932015-07-09 16:39:49 -04001121 mutex_lock(&blkcg_pol_mutex);
1122
Tejun Heoeb954192013-08-08 20:11:23 -04001123 if (!parent_css) {
Tejun Heo3c798392012-04-16 13:57:25 -07001124 blkcg = &blkcg_root;
Tejun Heobc915e62015-08-18 14:55:08 -07001125 } else {
1126 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1127 if (!blkcg) {
1128 ret = ERR_PTR(-ENOMEM);
weiping zhang4c18c9e2017-08-25 23:49:32 +08001129 goto unlock;
Tejun Heobc915e62015-08-18 14:55:08 -07001130 }
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001131 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001132
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001133 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1134 struct blkcg_policy *pol = blkcg_policy[i];
1135 struct blkcg_policy_data *cpd;
1136
1137 /*
1138 * If the policy hasn't been attached yet, wait for it
1139 * to be attached before doing anything else. Otherwise,
1140 * check if the policy requires any specific per-cgroup
1141 * data: if it does, allocate and initialize it.
1142 */
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001143 if (!pol || !pol->cpd_alloc_fn)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001144 continue;
1145
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001146 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001147 if (!cpd) {
1148 ret = ERR_PTR(-ENOMEM);
1149 goto free_pd_blkcg;
1150 }
Tejun Heo81437642015-08-18 14:55:15 -07001151 blkcg->cpd[i] = cpd;
1152 cpd->blkcg = blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001153 cpd->plid = i;
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001154 if (pol->cpd_init_fn)
1155 pol->cpd_init_fn(cpd);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001156 }
1157
Vivek Goyal31e4c282009-12-03 12:59:42 -05001158 spin_lock_init(&blkcg->lock);
Tejun Heoe00f4f42016-11-21 18:03:32 -05001159 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001160 INIT_HLIST_HEAD(&blkcg->blkg_list);
Tejun Heo52ebea72015-05-22 17:13:37 -04001161#ifdef CONFIG_CGROUP_WRITEBACK
1162 INIT_LIST_HEAD(&blkcg->cgwb_list);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -04001163 refcount_set(&blkcg->cgwb_refcnt, 1);
Tejun Heo52ebea72015-05-22 17:13:37 -04001164#endif
Tejun Heo7876f932015-07-09 16:39:49 -04001165 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1166
1167 mutex_unlock(&blkcg_pol_mutex);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001168 return &blkcg->css;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001169
1170free_pd_blkcg:
1171 for (i--; i >= 0; i--)
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001172 if (blkcg->cpd[i])
1173 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
weiping zhang4c18c9e2017-08-25 23:49:32 +08001174
1175 if (blkcg != &blkcg_root)
1176 kfree(blkcg);
1177unlock:
Tejun Heo7876f932015-07-09 16:39:49 -04001178 mutex_unlock(&blkcg_pol_mutex);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001179 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001180}
1181
Tejun Heo5efd6112012-03-05 13:15:12 -08001182/**
1183 * blkcg_init_queue - initialize blkcg part of request queue
1184 * @q: request_queue to initialize
1185 *
1186 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1187 * part of new request_queue @q.
1188 *
1189 * RETURNS:
1190 * 0 on success, -errno on failure.
1191 */
1192int blkcg_init_queue(struct request_queue *q)
1193{
Jens Axboed708f0d2017-03-29 11:25:48 -06001194 struct blkcg_gq *new_blkg, *blkg;
1195 bool preloaded;
Tejun Heoec13b1d2015-05-22 17:13:19 -04001196 int ret;
Tejun Heo5efd6112012-03-05 13:15:12 -08001197
Jens Axboed708f0d2017-03-29 11:25:48 -06001198 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1199 if (!new_blkg)
1200 return -ENOMEM;
1201
1202 preloaded = !radix_tree_preload(GFP_KERNEL);
1203
Jiang Biaobea54882018-04-19 12:04:26 +08001204 /* Make sure the root blkg exists. */
Tejun Heoec13b1d2015-05-22 17:13:19 -04001205 rcu_read_lock();
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001206 spin_lock_irq(&q->queue_lock);
Jens Axboed708f0d2017-03-29 11:25:48 -06001207 blkg = blkg_create(&blkcg_root, q, new_blkg);
Jiang Biao901932a2018-04-19 12:06:09 +08001208 if (IS_ERR(blkg))
1209 goto err_unlock;
1210 q->root_blkg = blkg;
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001211 spin_unlock_irq(&q->queue_lock);
Tejun Heoec13b1d2015-05-22 17:13:19 -04001212 rcu_read_unlock();
1213
Jens Axboed708f0d2017-03-29 11:25:48 -06001214 if (preloaded)
1215 radix_tree_preload_end();
1216
Josef Bacikd7067512018-07-03 11:15:01 -04001217 ret = blk_iolatency_init(q);
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001218 if (ret)
1219 goto err_destroy_all;
Josef Bacikd7067512018-07-03 11:15:01 -04001220
Tejun Heoec13b1d2015-05-22 17:13:19 -04001221 ret = blk_throtl_init(q);
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001222 if (ret)
1223 goto err_destroy_all;
1224 return 0;
Jiang Biao901932a2018-04-19 12:06:09 +08001225
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001226err_destroy_all:
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001227 blkg_destroy_all(q);
Christoph Hellwig04be60b2018-11-14 17:02:12 +01001228 return ret;
Jiang Biao901932a2018-04-19 12:06:09 +08001229err_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001230 spin_unlock_irq(&q->queue_lock);
Jiang Biao901932a2018-04-19 12:06:09 +08001231 rcu_read_unlock();
1232 if (preloaded)
1233 radix_tree_preload_end();
1234 return PTR_ERR(blkg);
Tejun Heo5efd6112012-03-05 13:15:12 -08001235}
1236
1237/**
1238 * blkcg_drain_queue - drain blkcg part of request_queue
1239 * @q: request_queue to drain
1240 *
1241 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1242 */
1243void blkcg_drain_queue(struct request_queue *q)
1244{
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001245 lockdep_assert_held(&q->queue_lock);
Tejun Heo5efd6112012-03-05 13:15:12 -08001246
Tejun Heo0b462c82014-07-05 18:43:21 -04001247 /*
1248 * @q could be exiting and already have destroyed all blkgs as
1249 * indicated by NULL root_blkg. If so, don't confuse policies.
1250 */
1251 if (!q->root_blkg)
1252 return;
1253
Tejun Heo5efd6112012-03-05 13:15:12 -08001254 blk_throtl_drain(q);
1255}
1256
1257/**
1258 * blkcg_exit_queue - exit and release blkcg part of request_queue
1259 * @q: request_queue being released
1260 *
Marcos Paulo de Souza7585d502019-01-25 00:01:42 -02001261 * Called from blk_exit_queue(). Responsible for exiting blkcg part.
Tejun Heo5efd6112012-03-05 13:15:12 -08001262 */
1263void blkcg_exit_queue(struct request_queue *q)
1264{
Tejun Heo3c96cb32012-04-13 13:11:34 -07001265 blkg_destroy_all(q);
Tejun Heo5efd6112012-03-05 13:15:12 -08001266 blk_throtl_exit(q);
1267}
1268
Vivek Goyal31e4c282009-12-03 12:59:42 -05001269/*
1270 * We cannot support shared io contexts, as we have no mean to support
1271 * two tasks with the same ioc in two different groups without major rework
1272 * of the main cic data structures. For now we allow a task to change
1273 * its cgroup only if it's the only owner of its ioc.
1274 */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001275static int blkcg_can_attach(struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001276{
Tejun Heobb9d97b2011-12-12 18:12:21 -08001277 struct task_struct *task;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001278 struct cgroup_subsys_state *dst_css;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001279 struct io_context *ioc;
1280 int ret = 0;
1281
1282 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05001283 cgroup_taskset_for_each(task, dst_css, tset) {
Tejun Heobb9d97b2011-12-12 18:12:21 -08001284 task_lock(task);
1285 ioc = task->io_context;
1286 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1287 ret = -EINVAL;
1288 task_unlock(task);
1289 if (ret)
1290 break;
1291 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001292 return ret;
1293}
1294
Tejun Heo69d7fde2015-08-18 14:55:36 -07001295static void blkcg_bind(struct cgroup_subsys_state *root_css)
1296{
1297 int i;
1298
1299 mutex_lock(&blkcg_pol_mutex);
1300
1301 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1302 struct blkcg_policy *pol = blkcg_policy[i];
1303 struct blkcg *blkcg;
1304
1305 if (!pol || !pol->cpd_bind_fn)
1306 continue;
1307
1308 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1309 if (blkcg->cpd[pol->plid])
1310 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1311 }
1312 mutex_unlock(&blkcg_pol_mutex);
1313}
1314
Josef Bacikd09d8df2018-07-03 11:14:55 -04001315static void blkcg_exit(struct task_struct *tsk)
1316{
1317 if (tsk->throttle_queue)
1318 blk_put_queue(tsk->throttle_queue);
1319 tsk->throttle_queue = NULL;
1320}
1321
Tejun Heoc165b3e2015-08-18 14:55:29 -07001322struct cgroup_subsys io_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08001323 .css_alloc = blkcg_css_alloc,
1324 .css_offline = blkcg_css_offline,
1325 .css_free = blkcg_css_free,
Tejun Heo3c798392012-04-16 13:57:25 -07001326 .can_attach = blkcg_can_attach,
Tejun Heo69d7fde2015-08-18 14:55:36 -07001327 .bind = blkcg_bind,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001328 .dfl_cftypes = blkcg_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07001329 .legacy_cftypes = blkcg_legacy_files,
Tejun Heoc165b3e2015-08-18 14:55:29 -07001330 .legacy_name = "blkio",
Josef Bacikd09d8df2018-07-03 11:14:55 -04001331 .exit = blkcg_exit,
Tejun Heo1ced9532014-07-08 18:02:57 -04001332#ifdef CONFIG_MEMCG
1333 /*
1334 * This ensures that, if available, memcg is automatically enabled
1335 * together on the default hierarchy so that the owner cgroup can
1336 * be retrieved from writeback pages.
1337 */
1338 .depends_on = 1 << memory_cgrp_id,
1339#endif
Tejun Heo676f7c82012-04-01 12:09:55 -07001340};
Tejun Heoc165b3e2015-08-18 14:55:29 -07001341EXPORT_SYMBOL_GPL(io_cgrp_subsys);
Tejun Heo676f7c82012-04-01 12:09:55 -07001342
Tejun Heo8bd435b2012-04-13 13:11:28 -07001343/**
Tejun Heoa2b16932012-04-13 13:11:33 -07001344 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1345 * @q: request_queue of interest
1346 * @pol: blkcg policy to activate
1347 *
1348 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1349 * bypass mode to populate its blkgs with policy_data for @pol.
1350 *
1351 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1352 * from IO path. Update of each blkg is protected by both queue and blkcg
1353 * locks so that holding either lock and testing blkcg_policy_enabled() is
1354 * always enough for dereferencing policy data.
1355 *
1356 * The caller is responsible for synchronizing [de]activations and policy
1357 * [un]registerations. Returns 0 on success, -errno on failure.
1358 */
1359int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -07001360 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -07001361{
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001362 struct blkg_policy_data *pd_prealloc = NULL;
Tejun Heoec13b1d2015-05-22 17:13:19 -04001363 struct blkcg_gq *blkg;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001364 int ret;
Tejun Heoa2b16932012-04-13 13:11:33 -07001365
1366 if (blkcg_policy_enabled(q, pol))
1367 return 0;
1368
Jens Axboe344e9ff2018-11-15 12:22:51 -07001369 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001370 blk_mq_freeze_queue(q);
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001371pd_prealloc:
1372 if (!pd_prealloc) {
Tejun Heocf09a8e2019-08-28 15:05:51 -07001373 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001374 if (!pd_prealloc) {
Tejun Heoa2b16932012-04-13 13:11:33 -07001375 ret = -ENOMEM;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001376 goto out_bypass_end;
Tejun Heoa2b16932012-04-13 13:11:33 -07001377 }
Tejun Heoa2b16932012-04-13 13:11:33 -07001378 }
1379
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001380 spin_lock_irq(&q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -07001381
Tejun Heo71c81402019-06-13 15:30:40 -07001382 /* blkg_list is pushed at the head, reverse walk to init parents first */
1383 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001384 struct blkg_policy_data *pd;
Tejun Heoa2b16932012-04-13 13:11:33 -07001385
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001386 if (blkg->pd[pol->plid])
1387 continue;
1388
Tejun Heocf09a8e2019-08-28 15:05:51 -07001389 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001390 if (!pd)
1391 swap(pd, pd_prealloc);
1392 if (!pd) {
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001393 spin_unlock_irq(&q->queue_lock);
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001394 goto pd_prealloc;
1395 }
Tejun Heoa2b16932012-04-13 13:11:33 -07001396
1397 blkg->pd[pol->plid] = pd;
1398 pd->blkg = blkg;
Tejun Heob276a872013-01-09 08:05:12 -08001399 pd->plid = pol->plid;
Tejun Heo3e418712015-08-18 14:55:10 -07001400 if (pol->pd_init_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -07001401 pol->pd_init_fn(pd);
Tejun Heoa2b16932012-04-13 13:11:33 -07001402 }
1403
1404 __set_bit(pol->plid, q->blkcg_pols);
1405 ret = 0;
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001406
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001407 spin_unlock_irq(&q->queue_lock);
Tejun Heo4c55f4f2015-08-18 14:55:09 -07001408out_bypass_end:
Jens Axboe344e9ff2018-11-15 12:22:51 -07001409 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001410 blk_mq_unfreeze_queue(q);
Tejun Heo001bea72015-08-18 14:55:11 -07001411 if (pd_prealloc)
1412 pol->pd_free_fn(pd_prealloc);
Tejun Heoa2b16932012-04-13 13:11:33 -07001413 return ret;
1414}
1415EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1416
1417/**
1418 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1419 * @q: request_queue of interest
1420 * @pol: blkcg policy to deactivate
1421 *
1422 * Deactivate @pol on @q. Follows the same synchronization rules as
1423 * blkcg_activate_policy().
1424 */
1425void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -07001426 const struct blkcg_policy *pol)
Tejun Heoa2b16932012-04-13 13:11:33 -07001427{
Tejun Heo3c798392012-04-16 13:57:25 -07001428 struct blkcg_gq *blkg;
Tejun Heoa2b16932012-04-13 13:11:33 -07001429
1430 if (!blkcg_policy_enabled(q, pol))
1431 return;
1432
Jens Axboe344e9ff2018-11-15 12:22:51 -07001433 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001434 blk_mq_freeze_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001435
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001436 spin_lock_irq(&q->queue_lock);
Tejun Heoa2b16932012-04-13 13:11:33 -07001437
1438 __clear_bit(pol->plid, q->blkcg_pols);
1439
1440 list_for_each_entry(blkg, &q->blkg_list, q_node) {
Tejun Heo001bea72015-08-18 14:55:11 -07001441 if (blkg->pd[pol->plid]) {
Dennis Zhou (Facebook)6b065462018-08-31 16:22:42 -04001442 if (pol->pd_offline_fn)
Tejun Heoa9520cd2015-08-18 14:55:14 -07001443 pol->pd_offline_fn(blkg->pd[pol->plid]);
Tejun Heo001bea72015-08-18 14:55:11 -07001444 pol->pd_free_fn(blkg->pd[pol->plid]);
1445 blkg->pd[pol->plid] = NULL;
1446 }
Tejun Heoa2b16932012-04-13 13:11:33 -07001447 }
1448
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001449 spin_unlock_irq(&q->queue_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -07001450
Jens Axboe344e9ff2018-11-15 12:22:51 -07001451 if (queue_is_mq(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001452 blk_mq_unfreeze_queue(q);
Tejun Heoa2b16932012-04-13 13:11:33 -07001453}
1454EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1455
1456/**
Tejun Heo3c798392012-04-16 13:57:25 -07001457 * blkcg_policy_register - register a blkcg policy
1458 * @pol: blkcg policy to register
Tejun Heo8bd435b2012-04-13 13:11:28 -07001459 *
Tejun Heo3c798392012-04-16 13:57:25 -07001460 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1461 * successful registration. Returns 0 on success and -errno on failure.
Tejun Heo8bd435b2012-04-13 13:11:28 -07001462 */
Jens Axboed5bf0292014-06-22 16:31:56 -06001463int blkcg_policy_register(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -05001464{
Tejun Heo06b285b2015-07-09 16:39:50 -04001465 struct blkcg *blkcg;
Tejun Heo8bd435b2012-04-13 13:11:28 -07001466 int i, ret;
Tejun Heoe8989fa2012-03-05 13:15:20 -08001467
Tejun Heo838f13b2015-07-09 16:39:47 -04001468 mutex_lock(&blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -07001469 mutex_lock(&blkcg_pol_mutex);
1470
Tejun Heo8bd435b2012-04-13 13:11:28 -07001471 /* find an empty slot */
1472 ret = -ENOSPC;
1473 for (i = 0; i < BLKCG_MAX_POLS; i++)
Tejun Heo3c798392012-04-16 13:57:25 -07001474 if (!blkcg_policy[i])
Tejun Heo8bd435b2012-04-13 13:11:28 -07001475 break;
Jens Axboe01c5f852018-09-11 10:59:53 -06001476 if (i >= BLKCG_MAX_POLS) {
1477 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
Tejun Heo838f13b2015-07-09 16:39:47 -04001478 goto err_unlock;
Jens Axboe01c5f852018-09-11 10:59:53 -06001479 }
Tejun Heo035d10b2012-03-05 13:15:04 -08001480
weiping zhange8401072017-10-17 23:56:21 +08001481 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1482 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1483 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1484 goto err_unlock;
1485
Tejun Heo06b285b2015-07-09 16:39:50 -04001486 /* register @pol */
Tejun Heo3c798392012-04-16 13:57:25 -07001487 pol->plid = i;
Tejun Heo06b285b2015-07-09 16:39:50 -04001488 blkcg_policy[pol->plid] = pol;
1489
1490 /* allocate and install cpd's */
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001491 if (pol->cpd_alloc_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001492 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1493 struct blkcg_policy_data *cpd;
1494
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001495 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
Bart Van Asschebbb427e2016-09-29 08:33:30 -07001496 if (!cpd)
Tejun Heo06b285b2015-07-09 16:39:50 -04001497 goto err_free_cpds;
Tejun Heo06b285b2015-07-09 16:39:50 -04001498
Tejun Heo81437642015-08-18 14:55:15 -07001499 blkcg->cpd[pol->plid] = cpd;
1500 cpd->blkcg = blkcg;
Tejun Heo06b285b2015-07-09 16:39:50 -04001501 cpd->plid = pol->plid;
Tejun Heo86a5bba2019-08-28 15:05:52 -07001502 if (pol->cpd_init_fn)
1503 pol->cpd_init_fn(cpd);
Tejun Heo06b285b2015-07-09 16:39:50 -04001504 }
1505 }
1506
Tejun Heo838f13b2015-07-09 16:39:47 -04001507 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo8bd435b2012-04-13 13:11:28 -07001508
Tejun Heo8bd435b2012-04-13 13:11:28 -07001509 /* everything is in place, add intf files for the new policy */
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001510 if (pol->dfl_cftypes)
1511 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1512 pol->dfl_cftypes));
Tejun Heo880f50e2015-08-18 14:55:30 -07001513 if (pol->legacy_cftypes)
Tejun Heoc165b3e2015-08-18 14:55:29 -07001514 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
Tejun Heo880f50e2015-08-18 14:55:30 -07001515 pol->legacy_cftypes));
Tejun Heo838f13b2015-07-09 16:39:47 -04001516 mutex_unlock(&blkcg_pol_register_mutex);
1517 return 0;
1518
Tejun Heo06b285b2015-07-09 16:39:50 -04001519err_free_cpds:
weiping zhang58a9edc2017-10-10 22:53:46 +08001520 if (pol->cpd_free_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001521 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001522 if (blkcg->cpd[pol->plid]) {
1523 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1524 blkcg->cpd[pol->plid] = NULL;
1525 }
Tejun Heo06b285b2015-07-09 16:39:50 -04001526 }
1527 }
1528 blkcg_policy[pol->plid] = NULL;
Tejun Heo838f13b2015-07-09 16:39:47 -04001529err_unlock:
Tejun Heobc0d6502012-04-13 13:11:26 -07001530 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo838f13b2015-07-09 16:39:47 -04001531 mutex_unlock(&blkcg_pol_register_mutex);
Tejun Heo8bd435b2012-04-13 13:11:28 -07001532 return ret;
Vivek Goyal3e252062009-12-04 10:36:42 -05001533}
Tejun Heo3c798392012-04-16 13:57:25 -07001534EXPORT_SYMBOL_GPL(blkcg_policy_register);
Vivek Goyal3e252062009-12-04 10:36:42 -05001535
Tejun Heo8bd435b2012-04-13 13:11:28 -07001536/**
Tejun Heo3c798392012-04-16 13:57:25 -07001537 * blkcg_policy_unregister - unregister a blkcg policy
1538 * @pol: blkcg policy to unregister
Tejun Heo8bd435b2012-04-13 13:11:28 -07001539 *
Tejun Heo3c798392012-04-16 13:57:25 -07001540 * Undo blkcg_policy_register(@pol). Might sleep.
Tejun Heo8bd435b2012-04-13 13:11:28 -07001541 */
Tejun Heo3c798392012-04-16 13:57:25 -07001542void blkcg_policy_unregister(struct blkcg_policy *pol)
Vivek Goyal3e252062009-12-04 10:36:42 -05001543{
Tejun Heo06b285b2015-07-09 16:39:50 -04001544 struct blkcg *blkcg;
1545
Tejun Heo838f13b2015-07-09 16:39:47 -04001546 mutex_lock(&blkcg_pol_register_mutex);
Tejun Heobc0d6502012-04-13 13:11:26 -07001547
Tejun Heo3c798392012-04-16 13:57:25 -07001548 if (WARN_ON(blkcg_policy[pol->plid] != pol))
Tejun Heo8bd435b2012-04-13 13:11:28 -07001549 goto out_unlock;
1550
1551 /* kill the intf files first */
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001552 if (pol->dfl_cftypes)
1553 cgroup_rm_cftypes(pol->dfl_cftypes);
Tejun Heo880f50e2015-08-18 14:55:30 -07001554 if (pol->legacy_cftypes)
1555 cgroup_rm_cftypes(pol->legacy_cftypes);
Tejun Heo44ea53d2012-04-01 14:38:43 -07001556
Tejun Heo06b285b2015-07-09 16:39:50 -04001557 /* remove cpds and unregister */
Tejun Heo838f13b2015-07-09 16:39:47 -04001558 mutex_lock(&blkcg_pol_mutex);
Tejun Heo06b285b2015-07-09 16:39:50 -04001559
weiping zhang58a9edc2017-10-10 22:53:46 +08001560 if (pol->cpd_free_fn) {
Tejun Heo06b285b2015-07-09 16:39:50 -04001561 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001562 if (blkcg->cpd[pol->plid]) {
1563 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1564 blkcg->cpd[pol->plid] = NULL;
1565 }
Tejun Heo06b285b2015-07-09 16:39:50 -04001566 }
1567 }
Tejun Heo3c798392012-04-16 13:57:25 -07001568 blkcg_policy[pol->plid] = NULL;
Tejun Heo06b285b2015-07-09 16:39:50 -04001569
Tejun Heobc0d6502012-04-13 13:11:26 -07001570 mutex_unlock(&blkcg_pol_mutex);
Tejun Heo838f13b2015-07-09 16:39:47 -04001571out_unlock:
1572 mutex_unlock(&blkcg_pol_register_mutex);
Vivek Goyal3e252062009-12-04 10:36:42 -05001573}
Tejun Heo3c798392012-04-16 13:57:25 -07001574EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
Josef Bacik903d23f2018-07-03 11:14:52 -04001575
Tejun Heod3f77df2019-06-27 13:39:52 -07001576bool __blkcg_punt_bio_submit(struct bio *bio)
1577{
1578 struct blkcg_gq *blkg = bio->bi_blkg;
1579
1580 /* consume the flag first */
1581 bio->bi_opf &= ~REQ_CGROUP_PUNT;
1582
1583 /* never bounce for the root cgroup */
1584 if (!blkg->parent)
1585 return false;
1586
1587 spin_lock_bh(&blkg->async_bio_lock);
1588 bio_list_add(&blkg->async_bios, bio);
1589 spin_unlock_bh(&blkg->async_bio_lock);
1590
1591 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1592 return true;
1593}
1594
Josef Bacikd09d8df2018-07-03 11:14:55 -04001595/*
1596 * Scale the accumulated delay based on how long it has been since we updated
1597 * the delay. We only call this when we are adding delay, in case it's been a
1598 * while since we added delay, and when we are checking to see if we need to
1599 * delay a task, to account for any delays that may have occurred.
1600 */
1601static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1602{
1603 u64 old = atomic64_read(&blkg->delay_start);
1604
1605 /*
1606 * We only want to scale down every second. The idea here is that we
1607 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1608 * time window. We only want to throttle tasks for recent delay that
1609 * has occurred, in 1 second time windows since that's the maximum
1610 * things can be throttled. We save the current delay window in
1611 * blkg->last_delay so we know what amount is still left to be charged
1612 * to the blkg from this point onward. blkg->last_use keeps track of
1613 * the use_delay counter. The idea is if we're unthrottling the blkg we
1614 * are ok with whatever is happening now, and we can take away more of
1615 * the accumulated delay as we've already throttled enough that
1616 * everybody is happy with their IO latencies.
1617 */
1618 if (time_before64(old + NSEC_PER_SEC, now) &&
1619 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1620 u64 cur = atomic64_read(&blkg->delay_nsec);
1621 u64 sub = min_t(u64, blkg->last_delay, now - old);
1622 int cur_use = atomic_read(&blkg->use_delay);
1623
1624 /*
1625 * We've been unthrottled, subtract a larger chunk of our
1626 * accumulated delay.
1627 */
1628 if (cur_use < blkg->last_use)
1629 sub = max_t(u64, sub, blkg->last_delay >> 1);
1630
1631 /*
1632 * This shouldn't happen, but handle it anyway. Our delay_nsec
1633 * should only ever be growing except here where we subtract out
1634 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1635 * rather not end up with negative numbers.
1636 */
1637 if (unlikely(cur < sub)) {
1638 atomic64_set(&blkg->delay_nsec, 0);
1639 blkg->last_delay = 0;
1640 } else {
1641 atomic64_sub(sub, &blkg->delay_nsec);
1642 blkg->last_delay = cur - sub;
1643 }
1644 blkg->last_use = cur_use;
1645 }
1646}
1647
1648/*
1649 * This is called when we want to actually walk up the hierarchy and check to
1650 * see if we need to throttle, and then actually throttle if there is some
1651 * accumulated delay. This should only be called upon return to user space so
1652 * we're not holding some lock that would induce a priority inversion.
1653 */
1654static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1655{
Josef Bacikfd112c72019-07-09 14:41:29 -07001656 unsigned long pflags;
Josef Bacikd09d8df2018-07-03 11:14:55 -04001657 u64 now = ktime_to_ns(ktime_get());
1658 u64 exp;
1659 u64 delay_nsec = 0;
1660 int tok;
1661
1662 while (blkg->parent) {
1663 if (atomic_read(&blkg->use_delay)) {
1664 blkcg_scale_delay(blkg, now);
1665 delay_nsec = max_t(u64, delay_nsec,
1666 atomic64_read(&blkg->delay_nsec));
1667 }
1668 blkg = blkg->parent;
1669 }
1670
1671 if (!delay_nsec)
1672 return;
1673
1674 /*
1675 * Let's not sleep for all eternity if we've amassed a huge delay.
1676 * Swapping or metadata IO can accumulate 10's of seconds worth of
1677 * delay, and we want userspace to be able to do _something_ so cap the
1678 * delays at 1 second. If there's 10's of seconds worth of delay then
1679 * the tasks will be delayed for 1 second for every syscall.
1680 */
1681 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1682
Josef Bacikfd112c72019-07-09 14:41:29 -07001683 if (use_memdelay)
1684 psi_memstall_enter(&pflags);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001685
1686 exp = ktime_add_ns(now, delay_nsec);
1687 tok = io_schedule_prepare();
1688 do {
1689 __set_current_state(TASK_KILLABLE);
1690 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1691 break;
1692 } while (!fatal_signal_pending(current));
1693 io_schedule_finish(tok);
Josef Bacikfd112c72019-07-09 14:41:29 -07001694
1695 if (use_memdelay)
1696 psi_memstall_leave(&pflags);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001697}
1698
1699/**
1700 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1701 *
1702 * This is only called if we've been marked with set_notify_resume(). Obviously
1703 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1704 * check to see if current->throttle_queue is set and if not this doesn't do
1705 * anything. This should only ever be called by the resume code, it's not meant
1706 * to be called by people willy-nilly as it will actually do the work to
1707 * throttle the task if it is setup for throttling.
1708 */
1709void blkcg_maybe_throttle_current(void)
1710{
1711 struct request_queue *q = current->throttle_queue;
1712 struct cgroup_subsys_state *css;
1713 struct blkcg *blkcg;
1714 struct blkcg_gq *blkg;
1715 bool use_memdelay = current->use_memdelay;
1716
1717 if (!q)
1718 return;
1719
1720 current->throttle_queue = NULL;
1721 current->use_memdelay = false;
1722
1723 rcu_read_lock();
1724 css = kthread_blkcg();
1725 if (css)
1726 blkcg = css_to_blkcg(css);
1727 else
1728 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1729
1730 if (!blkcg)
1731 goto out;
1732 blkg = blkg_lookup(blkcg, q);
1733 if (!blkg)
1734 goto out;
Dennis Zhou7754f662018-12-05 12:10:39 -05001735 if (!blkg_tryget(blkg))
Josef Bacikd09d8df2018-07-03 11:14:55 -04001736 goto out;
1737 rcu_read_unlock();
Josef Bacikd09d8df2018-07-03 11:14:55 -04001738
1739 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1740 blkg_put(blkg);
Josef Bacikcc7ecc252018-07-31 12:39:03 -04001741 blk_put_queue(q);
Josef Bacikd09d8df2018-07-03 11:14:55 -04001742 return;
1743out:
1744 rcu_read_unlock();
1745 blk_put_queue(q);
1746}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001747
1748/**
1749 * blkcg_schedule_throttle - this task needs to check for throttling
Bart Van Assche537d71b2019-03-20 13:18:45 -07001750 * @q: the request queue IO was submitted on
1751 * @use_memdelay: do we charge this to memory delay for PSI
Josef Bacikd09d8df2018-07-03 11:14:55 -04001752 *
1753 * This is called by the IO controller when we know there's delay accumulated
1754 * for the blkg for this task. We do not pass the blkg because there are places
1755 * we call this that may not have that information, the swapping code for
1756 * instance will only have a request_queue at that point. This set's the
1757 * notify_resume for the task to check and see if it requires throttling before
1758 * returning to user space.
1759 *
1760 * We will only schedule once per syscall. You can call this over and over
1761 * again and it will only do the check once upon return to user space, and only
1762 * throttle once. If the task needs to be throttled again it'll need to be
1763 * re-set at the next time we see the task.
1764 */
1765void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1766{
1767 if (unlikely(current->flags & PF_KTHREAD))
1768 return;
1769
1770 if (!blk_get_queue(q))
1771 return;
1772
1773 if (current->throttle_queue)
1774 blk_put_queue(current->throttle_queue);
1775 current->throttle_queue = q;
1776 if (use_memdelay)
1777 current->use_memdelay = use_memdelay;
1778 set_notify_resume(current);
1779}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001780
1781/**
1782 * blkcg_add_delay - add delay to this blkg
Bart Van Assche537d71b2019-03-20 13:18:45 -07001783 * @blkg: blkg of interest
1784 * @now: the current time in nanoseconds
1785 * @delta: how many nanoseconds of delay to add
Josef Bacikd09d8df2018-07-03 11:14:55 -04001786 *
1787 * Charge @delta to the blkg's current delay accumulation. This is used to
1788 * throttle tasks if an IO controller thinks we need more throttling.
1789 */
1790void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1791{
1792 blkcg_scale_delay(blkg, now);
1793 atomic64_add(delta, &blkg->delay_nsec);
1794}
Josef Bacikd09d8df2018-07-03 11:14:55 -04001795
Tejun Heod3f77df2019-06-27 13:39:52 -07001796static int __init blkcg_init(void)
1797{
1798 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
1799 WQ_MEM_RECLAIM | WQ_FREEZABLE |
1800 WQ_UNBOUND | WQ_SYSFS, 0);
1801 if (!blkcg_punt_bio_wq)
1802 return -ENOMEM;
1803 return 0;
1804}
1805subsys_initcall(blkcg_init);
1806
Josef Bacik903d23f2018-07-03 11:14:52 -04001807module_param(blkcg_debug_stats, bool, 0644);
1808MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");