blob: b1231524a0978a31db45f2150e08bb4e34519c2c [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050015#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110016#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070017#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080019#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080020#include <linux/delay.h>
Tejun Heo9a9e8a22012-03-19 15:10:56 -070021#include <linux/atomic.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080022#include "blk-cgroup.h"
Tejun Heo5efd6112012-03-05 13:15:12 -080023#include "blk.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050024
Divyesh Shah84c124d2010-04-09 08:31:19 +020025#define MAX_KEY_LEN 100
26
Tejun Heobc0d6502012-04-13 13:11:26 -070027static DEFINE_MUTEX(blkcg_pol_mutex);
Tejun Heo923adde2012-03-05 13:15:13 -080028static DEFINE_MUTEX(all_q_mutex);
29static LIST_HEAD(all_q_list);
30
Tejun Heo3381cb82012-04-01 14:38:44 -070031struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050032EXPORT_SYMBOL_GPL(blkio_root_cgroup);
33
Tejun Heo035d10b2012-03-05 13:15:04 -080034static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
35
Vivek Goyal31e4c282009-12-03 12:59:42 -050036struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
37{
38 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
39 struct blkio_cgroup, css);
40}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050041EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050042
Tejun Heo4f85cb92012-03-05 13:15:28 -080043static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
Vivek Goyal70087dc2011-05-16 15:24:08 +020044{
45 return container_of(task_subsys_state(tsk, blkio_subsys_id),
46 struct blkio_cgroup, css);
47}
Tejun Heo4f85cb92012-03-05 13:15:28 -080048
49struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
50{
51 if (bio && bio->bi_css)
52 return container_of(bio->bi_css, struct blkio_cgroup, css);
53 return task_blkio_cgroup(current);
54}
55EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
Vivek Goyal70087dc2011-05-16 15:24:08 +020056
Tejun Heo03814112012-03-05 13:15:14 -080057/**
58 * blkg_free - free a blkg
59 * @blkg: blkg to free
60 *
61 * Free @blkg which may be partially allocated.
62 */
63static void blkg_free(struct blkio_group *blkg)
64{
Tejun Heoe8989fa2012-03-05 13:15:20 -080065 int i;
Tejun Heo549d3aa2012-03-05 13:15:16 -080066
67 if (!blkg)
68 return;
69
Tejun Heoe8989fa2012-03-05 13:15:20 -080070 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
Tejun Heo9ade5ea2012-04-01 14:38:44 -070071 struct blkio_policy_type *pol = blkio_policy[i];
Tejun Heoe8989fa2012-03-05 13:15:20 -080072 struct blkg_policy_data *pd = blkg->pd[i];
73
Tejun Heo9ade5ea2012-04-01 14:38:44 -070074 if (!pd)
75 continue;
76
77 if (pol && pol->ops.blkio_exit_group_fn)
78 pol->ops.blkio_exit_group_fn(blkg);
79
Tejun Heo9ade5ea2012-04-01 14:38:44 -070080 kfree(pd);
Tejun Heo03814112012-03-05 13:15:14 -080081 }
Tejun Heoe8989fa2012-03-05 13:15:20 -080082
Tejun Heo549d3aa2012-03-05 13:15:16 -080083 kfree(blkg);
Tejun Heo03814112012-03-05 13:15:14 -080084}
85
86/**
87 * blkg_alloc - allocate a blkg
88 * @blkcg: block cgroup the new blkg is associated with
89 * @q: request_queue the new blkg is associated with
Tejun Heo03814112012-03-05 13:15:14 -080090 *
Tejun Heoe8989fa2012-03-05 13:15:20 -080091 * Allocate a new blkg assocating @blkcg and @q.
Tejun Heo03814112012-03-05 13:15:14 -080092 */
93static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -080094 struct request_queue *q)
Tejun Heo03814112012-03-05 13:15:14 -080095{
96 struct blkio_group *blkg;
Tejun Heoe8989fa2012-03-05 13:15:20 -080097 int i;
Tejun Heo03814112012-03-05 13:15:14 -080098
99 /* alloc and init base part */
100 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
101 if (!blkg)
102 return NULL;
103
Tejun Heoc875f4d2012-03-05 13:15:22 -0800104 blkg->q = q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800105 INIT_LIST_HEAD(&blkg->q_node);
Tejun Heo03814112012-03-05 13:15:14 -0800106 blkg->blkcg = blkcg;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800107 blkg->refcnt = 1;
Tejun Heo03814112012-03-05 13:15:14 -0800108 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
109
Tejun Heoe8989fa2012-03-05 13:15:20 -0800110 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
111 struct blkio_policy_type *pol = blkio_policy[i];
112 struct blkg_policy_data *pd;
Tejun Heo03814112012-03-05 13:15:14 -0800113
Tejun Heoe8989fa2012-03-05 13:15:20 -0800114 if (!pol)
115 continue;
Tejun Heo549d3aa2012-03-05 13:15:16 -0800116
Tejun Heoe8989fa2012-03-05 13:15:20 -0800117 /* alloc per-policy data and attach it to blkg */
118 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
119 q->node);
120 if (!pd) {
121 blkg_free(blkg);
122 return NULL;
123 }
Tejun Heo549d3aa2012-03-05 13:15:16 -0800124
Tejun Heoe8989fa2012-03-05 13:15:20 -0800125 blkg->pd[i] = pd;
126 pd->blkg = blkg;
Tejun Heo03814112012-03-05 13:15:14 -0800127 }
128
Tejun Heo549d3aa2012-03-05 13:15:16 -0800129 /* invoke per-policy init */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800130 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
131 struct blkio_policy_type *pol = blkio_policy[i];
132
133 if (pol)
134 pol->ops.blkio_init_group_fn(blkg);
135 }
136
Tejun Heo03814112012-03-05 13:15:14 -0800137 return blkg;
138}
139
Tejun Heocd1604f2012-03-05 13:15:06 -0800140struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
141 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800142 bool for_root)
143 __releases(q->queue_lock) __acquires(q->queue_lock)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400144{
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800145 struct blkio_group *blkg;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400146
Tejun Heocd1604f2012-03-05 13:15:06 -0800147 WARN_ON_ONCE(!rcu_read_lock_held());
148 lockdep_assert_held(q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500149
Tejun Heocd1604f2012-03-05 13:15:06 -0800150 /*
151 * This could be the first entry point of blkcg implementation and
152 * we shouldn't allow anything to go through for a bypassing queue.
153 * The following can be removed if blkg lookup is guaranteed to
154 * fail on a bypassing queue.
155 */
156 if (unlikely(blk_queue_bypass(q)) && !for_root)
157 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
158
Tejun Heoe8989fa2012-03-05 13:15:20 -0800159 blkg = blkg_lookup(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800160 if (blkg)
161 return blkg;
162
Tejun Heo7ee9c562012-03-05 13:15:11 -0800163 /* blkg holds a reference to blkcg */
Tejun Heocd1604f2012-03-05 13:15:06 -0800164 if (!css_tryget(&blkcg->css))
165 return ERR_PTR(-EINVAL);
166
167 /*
168 * Allocate and initialize.
Tejun Heocd1604f2012-03-05 13:15:06 -0800169 */
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800170 blkg = blkg_alloc(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800171
172 /* did alloc fail? */
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800173 if (unlikely(!blkg)) {
Tejun Heocd1604f2012-03-05 13:15:06 -0800174 blkg = ERR_PTR(-ENOMEM);
175 goto out;
176 }
177
178 /* insert */
179 spin_lock(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500180 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800181 list_add(&blkg->q_node, &q->blkg_list);
Tejun Heocd1604f2012-03-05 13:15:06 -0800182 spin_unlock(&blkcg->lock);
183out:
Tejun Heocd1604f2012-03-05 13:15:06 -0800184 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500185}
Tejun Heocd1604f2012-03-05 13:15:06 -0800186EXPORT_SYMBOL_GPL(blkg_lookup_create);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500187
Vivek Goyal31e4c282009-12-03 12:59:42 -0500188/* called under rcu_read_lock(). */
Tejun Heocd1604f2012-03-05 13:15:06 -0800189struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800190 struct request_queue *q)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500191{
192 struct blkio_group *blkg;
193 struct hlist_node *n;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500194
Tejun Heoca32aef2012-03-05 13:15:03 -0800195 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
Tejun Heoe8989fa2012-03-05 13:15:20 -0800196 if (blkg->q == q)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500197 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500198 return NULL;
199}
Tejun Heocd1604f2012-03-05 13:15:06 -0800200EXPORT_SYMBOL_GPL(blkg_lookup);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500201
Tejun Heoe8989fa2012-03-05 13:15:20 -0800202static void blkg_destroy(struct blkio_group *blkg)
Tejun Heo72e06c22012-03-05 13:15:00 -0800203{
Tejun Heo03aa2642012-03-05 13:15:19 -0800204 struct request_queue *q = blkg->q;
Tejun Heo9f13ef62012-03-05 13:15:21 -0800205 struct blkio_cgroup *blkcg = blkg->blkcg;
Tejun Heo03aa2642012-03-05 13:15:19 -0800206
207 lockdep_assert_held(q->queue_lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800208 lockdep_assert_held(&blkcg->lock);
Tejun Heo03aa2642012-03-05 13:15:19 -0800209
210 /* Something wrong if we are trying to remove same group twice */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800211 WARN_ON_ONCE(list_empty(&blkg->q_node));
Tejun Heo9f13ef62012-03-05 13:15:21 -0800212 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
Tejun Heoe8989fa2012-03-05 13:15:20 -0800213 list_del_init(&blkg->q_node);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800214 hlist_del_init_rcu(&blkg->blkcg_node);
Tejun Heo03aa2642012-03-05 13:15:19 -0800215
Tejun Heo03aa2642012-03-05 13:15:19 -0800216 /*
217 * Put the reference taken at the time of creation so that when all
218 * queues are gone, group can be destroyed.
219 */
220 blkg_put(blkg);
221}
222
Tejun Heoe8989fa2012-03-05 13:15:20 -0800223/*
224 * XXX: This updates blkg policy data in-place for root blkg, which is
225 * necessary across elevator switch and policy registration as root blkgs
226 * aren't shot down. This broken and racy implementation is temporary.
227 * Eventually, blkg shoot down will be replaced by proper in-place update.
228 */
Tejun Heoec399342012-04-13 13:11:27 -0700229void update_root_blkg_pd(struct request_queue *q,
230 const struct blkio_policy_type *pol)
Tejun Heoe8989fa2012-03-05 13:15:20 -0800231{
Tejun Heoe8989fa2012-03-05 13:15:20 -0800232 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
233 struct blkg_policy_data *pd;
234
235 if (!blkg)
236 return;
237
Tejun Heoec399342012-04-13 13:11:27 -0700238 kfree(blkg->pd[pol->plid]);
239 blkg->pd[pol->plid] = NULL;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800240
241 if (!pol)
242 return;
243
244 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
245 WARN_ON_ONCE(!pd);
246
Tejun Heoec399342012-04-13 13:11:27 -0700247 blkg->pd[pol->plid] = pd;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800248 pd->blkg = blkg;
249 pol->ops.blkio_init_group_fn(blkg);
250}
251EXPORT_SYMBOL_GPL(update_root_blkg_pd);
252
Tejun Heo9f13ef62012-03-05 13:15:21 -0800253/**
254 * blkg_destroy_all - destroy all blkgs associated with a request_queue
255 * @q: request_queue of interest
256 * @destroy_root: whether to destroy root blkg or not
257 *
258 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
259 * destroyed; otherwise, root blkg is left alone.
260 */
Tejun Heoe8989fa2012-03-05 13:15:20 -0800261void blkg_destroy_all(struct request_queue *q, bool destroy_root)
Tejun Heo03aa2642012-03-05 13:15:19 -0800262{
263 struct blkio_group *blkg, *n;
Tejun Heo72e06c22012-03-05 13:15:00 -0800264
Tejun Heo9f13ef62012-03-05 13:15:21 -0800265 spin_lock_irq(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800266
Tejun Heo9f13ef62012-03-05 13:15:21 -0800267 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
268 struct blkio_cgroup *blkcg = blkg->blkcg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800269
Tejun Heo9f13ef62012-03-05 13:15:21 -0800270 /* skip root? */
271 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
272 continue;
Tejun Heo03aa2642012-03-05 13:15:19 -0800273
Tejun Heo9f13ef62012-03-05 13:15:21 -0800274 spin_lock(&blkcg->lock);
275 blkg_destroy(blkg);
276 spin_unlock(&blkcg->lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800277 }
Tejun Heo9f13ef62012-03-05 13:15:21 -0800278
279 spin_unlock_irq(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -0800280}
Tejun Heo03aa2642012-03-05 13:15:19 -0800281EXPORT_SYMBOL_GPL(blkg_destroy_all);
Tejun Heo72e06c22012-03-05 13:15:00 -0800282
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800283static void blkg_rcu_free(struct rcu_head *rcu_head)
284{
285 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
286}
287
288void __blkg_release(struct blkio_group *blkg)
289{
290 /* release the extra blkcg reference this blkg has been holding */
291 css_put(&blkg->blkcg->css);
292
293 /*
294 * A group is freed in rcu manner. But having an rcu lock does not
295 * mean that one can access all the fields of blkg and assume these
296 * are valid. For example, don't try to follow throtl_data and
297 * request queue links.
298 *
299 * Having a reference to blkg under an rcu allows acess to only
300 * values local to groups like group stats and group rate limits
301 */
302 call_rcu(&blkg->rcu_head, blkg_rcu_free);
303}
304EXPORT_SYMBOL_GPL(__blkg_release);
305
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700306static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200307blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700308{
Tejun Heo997a0262012-03-08 10:53:58 -0800309 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700310 struct blkio_group *blkg;
311 struct hlist_node *n;
Tejun Heobc0d6502012-04-13 13:11:26 -0700312 int i;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700313
Tejun Heobc0d6502012-04-13 13:11:26 -0700314 mutex_lock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700315 spin_lock_irq(&blkcg->lock);
Tejun Heo997a0262012-03-08 10:53:58 -0800316
317 /*
318 * Note that stat reset is racy - it doesn't synchronize against
319 * stat updates. This is a debug feature which shouldn't exist
320 * anyway. If you get hit by a race, retry.
321 */
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700322 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heobc0d6502012-04-13 13:11:26 -0700323 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
324 struct blkio_policy_type *pol = blkio_policy[i];
Tejun Heo549d3aa2012-03-05 13:15:16 -0800325
Tejun Heobc0d6502012-04-13 13:11:26 -0700326 if (pol && pol->ops.blkio_reset_group_stats_fn)
Tejun Heo9ade5ea2012-04-01 14:38:44 -0700327 pol->ops.blkio_reset_group_stats_fn(blkg);
Tejun Heobc0d6502012-04-13 13:11:26 -0700328 }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700329 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400330
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700331 spin_unlock_irq(&blkcg->lock);
Tejun Heobc0d6502012-04-13 13:11:26 -0700332 mutex_unlock(&blkcg_pol_mutex);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700333 return 0;
334}
335
Tejun Heod3d32e62012-04-01 14:38:42 -0700336static const char *blkg_dev_name(struct blkio_group *blkg)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700337{
Tejun Heod3d32e62012-04-01 14:38:42 -0700338 /* some drivers (floppy) instantiate a queue w/o disk registered */
339 if (blkg->q->backing_dev_info.dev)
340 return dev_name(blkg->q->backing_dev_info.dev);
341 return NULL;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700342}
343
Tejun Heod3d32e62012-04-01 14:38:42 -0700344/**
345 * blkcg_print_blkgs - helper for printing per-blkg data
346 * @sf: seq_file to print to
347 * @blkcg: blkcg of interest
348 * @prfill: fill function to print out a blkg
349 * @pol: policy in question
350 * @data: data to be passed to @prfill
351 * @show_total: to print out sum of prfill return values or not
352 *
353 * This function invokes @prfill on each blkg of @blkcg if pd for the
354 * policy specified by @pol exists. @prfill is invoked with @sf, the
355 * policy data and @data. If @show_total is %true, the sum of the return
356 * values from @prfill is printed with "Total" label at the end.
357 *
358 * This is to be used to construct print functions for
359 * cftype->read_seq_string method.
360 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700361void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
Tejun Heod366e7e2012-04-01 14:38:44 -0700362 u64 (*prfill)(struct seq_file *, void *, int),
Tejun Heoec399342012-04-13 13:11:27 -0700363 const struct blkio_policy_type *pol, int data,
364 bool show_total)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400365{
Tejun Heod3d32e62012-04-01 14:38:42 -0700366 struct blkio_group *blkg;
367 struct hlist_node *n;
368 u64 total = 0;
369
370 spin_lock_irq(&blkcg->lock);
371 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
Tejun Heoec399342012-04-13 13:11:27 -0700372 if (blkg->pd[pol->plid])
373 total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
Tejun Heod3d32e62012-04-01 14:38:42 -0700374 spin_unlock_irq(&blkcg->lock);
375
376 if (show_total)
377 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
378}
Tejun Heo829fdb52012-04-01 14:38:43 -0700379EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
Tejun Heod3d32e62012-04-01 14:38:42 -0700380
381/**
382 * __blkg_prfill_u64 - prfill helper for a single u64 value
383 * @sf: seq_file to print to
Tejun Heod366e7e2012-04-01 14:38:44 -0700384 * @pdata: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700385 * @v: value to print
386 *
Tejun Heod366e7e2012-04-01 14:38:44 -0700387 * Print @v to @sf for the device assocaited with @pdata.
Tejun Heod3d32e62012-04-01 14:38:42 -0700388 */
Tejun Heod366e7e2012-04-01 14:38:44 -0700389u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v)
Tejun Heod3d32e62012-04-01 14:38:42 -0700390{
Tejun Heod366e7e2012-04-01 14:38:44 -0700391 const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
Tejun Heod3d32e62012-04-01 14:38:42 -0700392
393 if (!dname)
394 return 0;
395
396 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
397 return v;
398}
Tejun Heo829fdb52012-04-01 14:38:43 -0700399EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
Tejun Heod3d32e62012-04-01 14:38:42 -0700400
401/**
402 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
403 * @sf: seq_file to print to
Tejun Heod366e7e2012-04-01 14:38:44 -0700404 * @pdata: policy private data of interest
Tejun Heod3d32e62012-04-01 14:38:42 -0700405 * @rwstat: rwstat to print
406 *
Tejun Heod366e7e2012-04-01 14:38:44 -0700407 * Print @rwstat to @sf for the device assocaited with @pdata.
Tejun Heod3d32e62012-04-01 14:38:42 -0700408 */
Tejun Heod366e7e2012-04-01 14:38:44 -0700409u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata,
Tejun Heo829fdb52012-04-01 14:38:43 -0700410 const struct blkg_rwstat *rwstat)
Tejun Heod3d32e62012-04-01 14:38:42 -0700411{
412 static const char *rwstr[] = {
413 [BLKG_RWSTAT_READ] = "Read",
414 [BLKG_RWSTAT_WRITE] = "Write",
415 [BLKG_RWSTAT_SYNC] = "Sync",
416 [BLKG_RWSTAT_ASYNC] = "Async",
417 };
Tejun Heod366e7e2012-04-01 14:38:44 -0700418 const char *dname = blkg_dev_name(pdata_to_blkg(pdata));
Tejun Heod3d32e62012-04-01 14:38:42 -0700419 u64 v;
420 int i;
421
422 if (!dname)
423 return 0;
424
425 for (i = 0; i < BLKG_RWSTAT_NR; i++)
426 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
427 (unsigned long long)rwstat->cnt[i]);
428
429 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
430 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
431 return v;
432}
433
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700434/**
435 * blkg_prfill_stat - prfill callback for blkg_stat
436 * @sf: seq_file to print to
437 * @pdata: policy private data of interest
438 * @off: offset to the blkg_stat in @pdata
439 *
440 * prfill callback for printing a blkg_stat.
441 */
442u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off)
Tejun Heod3d32e62012-04-01 14:38:42 -0700443{
Tejun Heod366e7e2012-04-01 14:38:44 -0700444 return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off));
Tejun Heod3d32e62012-04-01 14:38:42 -0700445}
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700446EXPORT_SYMBOL_GPL(blkg_prfill_stat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700447
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700448/**
449 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
450 * @sf: seq_file to print to
451 * @pdata: policy private data of interest
452 * @off: offset to the blkg_rwstat in @pdata
453 *
454 * prfill callback for printing a blkg_rwstat.
455 */
456u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off)
Tejun Heod3d32e62012-04-01 14:38:42 -0700457{
Tejun Heod366e7e2012-04-01 14:38:44 -0700458 struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off);
Tejun Heod3d32e62012-04-01 14:38:42 -0700459
Tejun Heod366e7e2012-04-01 14:38:44 -0700460 return __blkg_prfill_rwstat(sf, pdata, &rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700461}
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700462EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
Tejun Heod3d32e62012-04-01 14:38:42 -0700463
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700464/**
465 * blkg_conf_prep - parse and prepare for per-blkg config update
466 * @blkcg: target block cgroup
467 * @input: input string
468 * @ctx: blkg_conf_ctx to be filled
469 *
470 * Parse per-blkg config update from @input and initialize @ctx with the
471 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
472 * value. This function returns with RCU read locked and must be paired
473 * with blkg_conf_finish().
474 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700475int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
476 struct blkg_conf_ctx *ctx)
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700477 __acquires(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800478{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700479 struct gendisk *disk;
480 struct blkio_group *blkg;
Tejun Heo726fa692012-04-01 14:38:43 -0700481 unsigned int major, minor;
482 unsigned long long v;
483 int part, ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800484
Tejun Heo726fa692012-04-01 14:38:43 -0700485 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
486 return -EINVAL;
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700487
Tejun Heo726fa692012-04-01 14:38:43 -0700488 disk = get_gendisk(MKDEV(major, minor), &part);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800489 if (!disk || part)
Tejun Heo726fa692012-04-01 14:38:43 -0700490 return -EINVAL;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800491
492 rcu_read_lock();
493
Tejun Heo4bfd4822012-03-05 13:15:08 -0800494 spin_lock_irq(disk->queue->queue_lock);
Tejun Heoaaec55a2012-04-01 14:38:42 -0700495 blkg = blkg_lookup_create(blkcg, disk->queue, false);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800496 spin_unlock_irq(disk->queue->queue_lock);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800497
Tejun Heo4bfd4822012-03-05 13:15:08 -0800498 if (IS_ERR(blkg)) {
499 ret = PTR_ERR(blkg);
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700500 rcu_read_unlock();
501 put_disk(disk);
502 /*
503 * If queue was bypassing, we should retry. Do so after a
504 * short msleep(). It isn't strictly necessary but queue
505 * can be bypassing for some time and it's always nice to
506 * avoid busy looping.
507 */
508 if (ret == -EBUSY) {
509 msleep(10);
510 ret = restart_syscall();
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400511 }
Tejun Heo726fa692012-04-01 14:38:43 -0700512 return ret;
Vivek Goyal062a6442010-09-15 17:06:33 -0400513 }
Tejun Heoe56da7e2012-03-05 13:15:07 -0800514
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700515 ctx->disk = disk;
516 ctx->blkg = blkg;
Tejun Heo726fa692012-04-01 14:38:43 -0700517 ctx->v = v;
518 return 0;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800519}
Tejun Heo829fdb52012-04-01 14:38:43 -0700520EXPORT_SYMBOL_GPL(blkg_conf_prep);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800521
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700522/**
523 * blkg_conf_finish - finish up per-blkg config update
524 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
525 *
526 * Finish up after per-blkg config update. This function must be paired
527 * with blkg_conf_prep().
528 */
Tejun Heo829fdb52012-04-01 14:38:43 -0700529void blkg_conf_finish(struct blkg_conf_ctx *ctx)
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700530 __releases(rcu)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800531{
Tejun Heo3a8b31d2012-04-01 14:38:43 -0700532 rcu_read_unlock();
533 put_disk(ctx->disk);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800534}
Tejun Heo829fdb52012-04-01 14:38:43 -0700535EXPORT_SYMBOL_GPL(blkg_conf_finish);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800536
Vivek Goyal31e4c282009-12-03 12:59:42 -0500537struct cftype blkio_files[] = {
538 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200539 .name = "reset_stats",
540 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500541 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700542 { } /* terminate */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500543};
544
Tejun Heo9f13ef62012-03-05 13:15:21 -0800545/**
546 * blkiocg_pre_destroy - cgroup pre_destroy callback
Tejun Heo9f13ef62012-03-05 13:15:21 -0800547 * @cgroup: cgroup of interest
548 *
549 * This function is called when @cgroup is about to go away and responsible
550 * for shooting down all blkgs associated with @cgroup. blkgs should be
551 * removed while holding both q and blkcg locks. As blkcg lock is nested
552 * inside q lock, this function performs reverse double lock dancing.
553 *
554 * This is the blkcg counterpart of ioc_release_fn().
555 */
Tejun Heo959d8512012-04-01 12:30:01 -0700556static int blkiocg_pre_destroy(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500557{
558 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
559
Tejun Heo9f13ef62012-03-05 13:15:21 -0800560 spin_lock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -0800561
Tejun Heo9f13ef62012-03-05 13:15:21 -0800562 while (!hlist_empty(&blkcg->blkg_list)) {
563 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
564 struct blkio_group, blkcg_node);
Tejun Heoc875f4d2012-03-05 13:15:22 -0800565 struct request_queue *q = blkg->q;
Vivek Goyalb1c35762009-12-03 12:59:47 -0500566
Tejun Heo9f13ef62012-03-05 13:15:21 -0800567 if (spin_trylock(q->queue_lock)) {
568 blkg_destroy(blkg);
569 spin_unlock(q->queue_lock);
570 } else {
571 spin_unlock_irq(&blkcg->lock);
Tejun Heo9f13ef62012-03-05 13:15:21 -0800572 cpu_relax();
Dan Carpentera5567932012-03-29 20:57:08 +0200573 spin_lock_irq(&blkcg->lock);
Jens Axboe0f3942a2010-05-03 14:28:55 +0200574 }
Tejun Heo9f13ef62012-03-05 13:15:21 -0800575 }
Jens Axboe0f3942a2010-05-03 14:28:55 +0200576
Tejun Heo9f13ef62012-03-05 13:15:21 -0800577 spin_unlock_irq(&blkcg->lock);
Tejun Heo7ee9c562012-03-05 13:15:11 -0800578 return 0;
579}
580
Li Zefan761b3ef52012-01-31 13:47:36 +0800581static void blkiocg_destroy(struct cgroup *cgroup)
Tejun Heo7ee9c562012-03-05 13:15:11 -0800582{
583 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
584
Ben Blum67523c42010-03-10 15:22:11 -0800585 if (blkcg != &blkio_root_cgroup)
586 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500587}
588
Li Zefan761b3ef52012-01-31 13:47:36 +0800589static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500590{
Tejun Heo9a9e8a22012-03-19 15:10:56 -0700591 static atomic64_t id_seq = ATOMIC64_INIT(0);
Li Zefan03415092010-05-07 08:57:00 +0200592 struct blkio_cgroup *blkcg;
593 struct cgroup *parent = cgroup->parent;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500594
Li Zefan03415092010-05-07 08:57:00 +0200595 if (!parent) {
Vivek Goyal31e4c282009-12-03 12:59:42 -0500596 blkcg = &blkio_root_cgroup;
597 goto done;
598 }
599
Vivek Goyal31e4c282009-12-03 12:59:42 -0500600 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
601 if (!blkcg)
602 return ERR_PTR(-ENOMEM);
603
Tejun Heo3381cb82012-04-01 14:38:44 -0700604 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
Tejun Heo9a9e8a22012-03-19 15:10:56 -0700605 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500606done:
607 spin_lock_init(&blkcg->lock);
608 INIT_HLIST_HEAD(&blkcg->blkg_list);
609
610 return &blkcg->css;
611}
612
Tejun Heo5efd6112012-03-05 13:15:12 -0800613/**
614 * blkcg_init_queue - initialize blkcg part of request queue
615 * @q: request_queue to initialize
616 *
617 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
618 * part of new request_queue @q.
619 *
620 * RETURNS:
621 * 0 on success, -errno on failure.
622 */
623int blkcg_init_queue(struct request_queue *q)
624{
Tejun Heo923adde2012-03-05 13:15:13 -0800625 int ret;
626
Tejun Heo5efd6112012-03-05 13:15:12 -0800627 might_sleep();
628
Tejun Heo923adde2012-03-05 13:15:13 -0800629 ret = blk_throtl_init(q);
630 if (ret)
631 return ret;
632
633 mutex_lock(&all_q_mutex);
634 INIT_LIST_HEAD(&q->all_q_node);
635 list_add_tail(&q->all_q_node, &all_q_list);
636 mutex_unlock(&all_q_mutex);
637
638 return 0;
Tejun Heo5efd6112012-03-05 13:15:12 -0800639}
640
641/**
642 * blkcg_drain_queue - drain blkcg part of request_queue
643 * @q: request_queue to drain
644 *
645 * Called from blk_drain_queue(). Responsible for draining blkcg part.
646 */
647void blkcg_drain_queue(struct request_queue *q)
648{
649 lockdep_assert_held(q->queue_lock);
650
651 blk_throtl_drain(q);
652}
653
654/**
655 * blkcg_exit_queue - exit and release blkcg part of request_queue
656 * @q: request_queue being released
657 *
658 * Called from blk_release_queue(). Responsible for exiting blkcg part.
659 */
660void blkcg_exit_queue(struct request_queue *q)
661{
Tejun Heo923adde2012-03-05 13:15:13 -0800662 mutex_lock(&all_q_mutex);
663 list_del_init(&q->all_q_node);
664 mutex_unlock(&all_q_mutex);
665
Tejun Heoe8989fa2012-03-05 13:15:20 -0800666 blkg_destroy_all(q, true);
667
Tejun Heo5efd6112012-03-05 13:15:12 -0800668 blk_throtl_exit(q);
669}
670
Vivek Goyal31e4c282009-12-03 12:59:42 -0500671/*
672 * We cannot support shared io contexts, as we have no mean to support
673 * two tasks with the same ioc in two different groups without major rework
674 * of the main cic data structures. For now we allow a task to change
675 * its cgroup only if it's the only owner of its ioc.
676 */
Li Zefan761b3ef52012-01-31 13:47:36 +0800677static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500678{
Tejun Heobb9d97b2011-12-12 18:12:21 -0800679 struct task_struct *task;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500680 struct io_context *ioc;
681 int ret = 0;
682
683 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heobb9d97b2011-12-12 18:12:21 -0800684 cgroup_taskset_for_each(task, cgrp, tset) {
685 task_lock(task);
686 ioc = task->io_context;
687 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
688 ret = -EINVAL;
689 task_unlock(task);
690 if (ret)
691 break;
692 }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500693 return ret;
694}
695
Tejun Heo923adde2012-03-05 13:15:13 -0800696static void blkcg_bypass_start(void)
697 __acquires(&all_q_mutex)
698{
699 struct request_queue *q;
700
701 mutex_lock(&all_q_mutex);
702
703 list_for_each_entry(q, &all_q_list, all_q_node) {
704 blk_queue_bypass_start(q);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800705 blkg_destroy_all(q, false);
Tejun Heo923adde2012-03-05 13:15:13 -0800706 }
707}
708
709static void blkcg_bypass_end(void)
710 __releases(&all_q_mutex)
711{
712 struct request_queue *q;
713
714 list_for_each_entry(q, &all_q_list, all_q_node)
715 blk_queue_bypass_end(q);
716
717 mutex_unlock(&all_q_mutex);
718}
719
Tejun Heo676f7c82012-04-01 12:09:55 -0700720struct cgroup_subsys blkio_subsys = {
721 .name = "blkio",
722 .create = blkiocg_create,
723 .can_attach = blkiocg_can_attach,
Tejun Heo959d8512012-04-01 12:30:01 -0700724 .pre_destroy = blkiocg_pre_destroy,
Tejun Heo676f7c82012-04-01 12:09:55 -0700725 .destroy = blkiocg_destroy,
Tejun Heo676f7c82012-04-01 12:09:55 -0700726 .subsys_id = blkio_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700727 .base_cftypes = blkio_files,
Tejun Heo676f7c82012-04-01 12:09:55 -0700728 .module = THIS_MODULE,
729};
730EXPORT_SYMBOL_GPL(blkio_subsys);
731
Vivek Goyal3e252062009-12-04 10:36:42 -0500732void blkio_policy_register(struct blkio_policy_type *blkiop)
733{
Tejun Heoe8989fa2012-03-05 13:15:20 -0800734 struct request_queue *q;
735
Tejun Heobc0d6502012-04-13 13:11:26 -0700736 mutex_lock(&blkcg_pol_mutex);
737
Tejun Heo923adde2012-03-05 13:15:13 -0800738 blkcg_bypass_start();
Tejun Heo035d10b2012-03-05 13:15:04 -0800739
740 BUG_ON(blkio_policy[blkiop->plid]);
741 blkio_policy[blkiop->plid] = blkiop;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800742 list_for_each_entry(q, &all_q_list, all_q_node)
Tejun Heoec399342012-04-13 13:11:27 -0700743 update_root_blkg_pd(q, blkiop);
Tejun Heobc0d6502012-04-13 13:11:26 -0700744
Tejun Heo923adde2012-03-05 13:15:13 -0800745 blkcg_bypass_end();
Tejun Heo44ea53d2012-04-01 14:38:43 -0700746
747 if (blkiop->cftypes)
748 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
Tejun Heobc0d6502012-04-13 13:11:26 -0700749
750 mutex_unlock(&blkcg_pol_mutex);
Vivek Goyal3e252062009-12-04 10:36:42 -0500751}
752EXPORT_SYMBOL_GPL(blkio_policy_register);
753
754void blkio_policy_unregister(struct blkio_policy_type *blkiop)
755{
Tejun Heoe8989fa2012-03-05 13:15:20 -0800756 struct request_queue *q;
757
Tejun Heobc0d6502012-04-13 13:11:26 -0700758 mutex_lock(&blkcg_pol_mutex);
759
Tejun Heo44ea53d2012-04-01 14:38:43 -0700760 if (blkiop->cftypes)
761 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
762
Tejun Heo923adde2012-03-05 13:15:13 -0800763 blkcg_bypass_start();
Tejun Heo035d10b2012-03-05 13:15:04 -0800764
765 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
766 blkio_policy[blkiop->plid] = NULL;
Tejun Heo035d10b2012-03-05 13:15:04 -0800767
Tejun Heoe8989fa2012-03-05 13:15:20 -0800768 list_for_each_entry(q, &all_q_list, all_q_node)
Tejun Heoec399342012-04-13 13:11:27 -0700769 update_root_blkg_pd(q, blkiop);
Tejun Heo923adde2012-03-05 13:15:13 -0800770 blkcg_bypass_end();
Tejun Heobc0d6502012-04-13 13:11:26 -0700771
772 mutex_unlock(&blkcg_pol_mutex);
Vivek Goyal3e252062009-12-04 10:36:42 -0500773}
774EXPORT_SYMBOL_GPL(blkio_policy_unregister);