Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
| 12 | */ |
| 13 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 14 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 15 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 16 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 19 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 20 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 22 | #include "blk-cgroup.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 23 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 24 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 25 | #define MAX_KEY_LEN 100 |
| 26 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 28 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
| 30 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 31 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 33 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 34 | struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 35 | { |
| 36 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 37 | struct blkcg, css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 38 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(cgroup_to_blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 40 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 41 | static struct blkcg *task_blkcg(struct task_struct *tsk) |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 42 | { |
| 43 | return container_of(task_subsys_state(tsk, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 44 | struct blkcg, css); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 45 | } |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 46 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 47 | struct blkcg *bio_blkcg(struct bio *bio) |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 48 | { |
| 49 | if (bio && bio->bi_css) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 50 | return container_of(bio->bi_css, struct blkcg, css); |
| 51 | return task_blkcg(current); |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 52 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL_GPL(bio_blkcg); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 54 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 55 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 56 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 57 | { |
| 58 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 59 | } |
| 60 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 61 | /** |
| 62 | * blkg_free - free a blkg |
| 63 | * @blkg: blkg to free |
| 64 | * |
| 65 | * Free @blkg which may be partially allocated. |
| 66 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 67 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 68 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 69 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 70 | |
| 71 | if (!blkg) |
| 72 | return; |
| 73 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 74 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 75 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 76 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 77 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 78 | if (!pd) |
| 79 | continue; |
| 80 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 81 | if (pol && pol->pd_exit_fn) |
| 82 | pol->pd_exit_fn(blkg); |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 83 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 84 | kfree(pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 85 | } |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 86 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 87 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /** |
| 91 | * blkg_alloc - allocate a blkg |
| 92 | * @blkcg: block cgroup the new blkg is associated with |
| 93 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 94 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 95 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 96 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 97 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 98 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 99 | struct blkcg_gq *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 100 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 101 | |
| 102 | /* alloc and init base part */ |
| 103 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); |
| 104 | if (!blkg) |
| 105 | return NULL; |
| 106 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 107 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 108 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 109 | blkg->blkcg = blkcg; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 110 | blkg->refcnt = 1; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 111 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 112 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 113 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 114 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 115 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 116 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 117 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 118 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 119 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 120 | pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 121 | if (!pd) { |
| 122 | blkg_free(blkg); |
| 123 | return NULL; |
| 124 | } |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 125 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 126 | blkg->pd[i] = pd; |
| 127 | pd->blkg = blkg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 128 | } |
| 129 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 130 | /* invoke per-policy init */ |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 131 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 132 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 133 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 134 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 135 | pol->pd_init_fn(blkg); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 136 | } |
| 137 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 138 | return blkg; |
| 139 | } |
| 140 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 141 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
| 142 | struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 143 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 144 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 145 | struct hlist_node *n; |
| 146 | |
| 147 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
| 148 | if (blkg->q == q) |
| 149 | return blkg; |
| 150 | return NULL; |
| 151 | } |
| 152 | |
| 153 | /** |
| 154 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 155 | * @blkcg: blkcg of interest |
| 156 | * @q: request_queue of interest |
| 157 | * |
| 158 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 159 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 160 | * - see blk_queue_bypass_start() for details. |
| 161 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 162 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 163 | { |
| 164 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 165 | |
| 166 | if (unlikely(blk_queue_bypass(q))) |
| 167 | return NULL; |
| 168 | return __blkg_lookup(blkcg, q); |
| 169 | } |
| 170 | EXPORT_SYMBOL_GPL(blkg_lookup); |
| 171 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 172 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
| 173 | struct request_queue *q) |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 174 | __releases(q->queue_lock) __acquires(q->queue_lock) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 175 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 176 | struct blkcg_gq *blkg; |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame^] | 177 | int ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 178 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 179 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 180 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 181 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 182 | blkg = __blkg_lookup(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 183 | if (blkg) |
| 184 | return blkg; |
| 185 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 186 | /* blkg holds a reference to blkcg */ |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 187 | if (!css_tryget(&blkcg->css)) |
| 188 | return ERR_PTR(-EINVAL); |
| 189 | |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame^] | 190 | /* allocate */ |
| 191 | ret = -ENOMEM; |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 192 | blkg = blkg_alloc(blkcg, q); |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame^] | 193 | if (unlikely(!blkg)) |
| 194 | goto err_put; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 195 | |
| 196 | /* insert */ |
| 197 | spin_lock(&blkcg->lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 198 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 199 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 200 | spin_unlock(&blkcg->lock); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 201 | return blkg; |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame^] | 202 | |
| 203 | err_put: |
| 204 | css_put(&blkcg->css); |
| 205 | return ERR_PTR(ret); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 206 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 207 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 208 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 209 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 210 | { |
| 211 | /* |
| 212 | * This could be the first entry point of blkcg implementation and |
| 213 | * we shouldn't allow anything to go through for a bypassing queue. |
| 214 | */ |
| 215 | if (unlikely(blk_queue_bypass(q))) |
| 216 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); |
| 217 | return __blkg_lookup_create(blkcg, q); |
| 218 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 219 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 220 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 221 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 222 | { |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 223 | struct request_queue *q = blkg->q; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 224 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 225 | |
| 226 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 227 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 228 | |
| 229 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 230 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 231 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 232 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 233 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 234 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 235 | /* |
| 236 | * Put the reference taken at the time of creation so that when all |
| 237 | * queues are gone, group can be destroyed. |
| 238 | */ |
| 239 | blkg_put(blkg); |
| 240 | } |
| 241 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 242 | /** |
| 243 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 244 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 245 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 246 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 247 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 248 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 249 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 250 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 251 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 252 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 253 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 254 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 255 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 256 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 257 | spin_lock(&blkcg->lock); |
| 258 | blkg_destroy(blkg); |
| 259 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 260 | } |
| 261 | } |
| 262 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 263 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
| 264 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 265 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 266 | } |
| 267 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 268 | void __blkg_release(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 269 | { |
| 270 | /* release the extra blkcg reference this blkg has been holding */ |
| 271 | css_put(&blkg->blkcg->css); |
| 272 | |
| 273 | /* |
| 274 | * A group is freed in rcu manner. But having an rcu lock does not |
| 275 | * mean that one can access all the fields of blkg and assume these |
| 276 | * are valid. For example, don't try to follow throtl_data and |
| 277 | * request queue links. |
| 278 | * |
| 279 | * Having a reference to blkg under an rcu allows acess to only |
| 280 | * values local to groups like group stats and group rate limits |
| 281 | */ |
| 282 | call_rcu(&blkg->rcu_head, blkg_rcu_free); |
| 283 | } |
| 284 | EXPORT_SYMBOL_GPL(__blkg_release); |
| 285 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 286 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
| 287 | u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 288 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 289 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
| 290 | struct blkcg_gq *blkg; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 291 | struct hlist_node *n; |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 292 | int i; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 293 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 294 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 295 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 296 | |
| 297 | /* |
| 298 | * Note that stat reset is racy - it doesn't synchronize against |
| 299 | * stat updates. This is a debug feature which shouldn't exist |
| 300 | * anyway. If you get hit by a race, retry. |
| 301 | */ |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 302 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 303 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 304 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 305 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 306 | if (blkcg_policy_enabled(blkg->q, pol) && |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 307 | pol->pd_reset_stats_fn) |
| 308 | pol->pd_reset_stats_fn(blkg); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 309 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 310 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 311 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 312 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 313 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 314 | return 0; |
| 315 | } |
| 316 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 317 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 318 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 319 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 320 | if (blkg->q->backing_dev_info.dev) |
| 321 | return dev_name(blkg->q->backing_dev_info.dev); |
| 322 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 325 | /** |
| 326 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 327 | * @sf: seq_file to print to |
| 328 | * @blkcg: blkcg of interest |
| 329 | * @prfill: fill function to print out a blkg |
| 330 | * @pol: policy in question |
| 331 | * @data: data to be passed to @prfill |
| 332 | * @show_total: to print out sum of prfill return values or not |
| 333 | * |
| 334 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 335 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 336 | * policy data and @data. If @show_total is %true, the sum of the return |
| 337 | * values from @prfill is printed with "Total" label at the end. |
| 338 | * |
| 339 | * This is to be used to construct print functions for |
| 340 | * cftype->read_seq_string method. |
| 341 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 342 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 343 | u64 (*prfill)(struct seq_file *, |
| 344 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 345 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 346 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 347 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 348 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 349 | struct hlist_node *n; |
| 350 | u64 total = 0; |
| 351 | |
| 352 | spin_lock_irq(&blkcg->lock); |
| 353 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 354 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 355 | total += prfill(sf, blkg->pd[pol->plid], data); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 356 | spin_unlock_irq(&blkcg->lock); |
| 357 | |
| 358 | if (show_total) |
| 359 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 360 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 361 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 362 | |
| 363 | /** |
| 364 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 365 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 366 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 367 | * @v: value to print |
| 368 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 369 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 370 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 371 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 372 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 373 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 374 | |
| 375 | if (!dname) |
| 376 | return 0; |
| 377 | |
| 378 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 379 | return v; |
| 380 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 381 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 382 | |
| 383 | /** |
| 384 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 385 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 386 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 387 | * @rwstat: rwstat to print |
| 388 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 389 | * Print @rwstat to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 390 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 391 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 392 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 393 | { |
| 394 | static const char *rwstr[] = { |
| 395 | [BLKG_RWSTAT_READ] = "Read", |
| 396 | [BLKG_RWSTAT_WRITE] = "Write", |
| 397 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 398 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 399 | }; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 400 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 401 | u64 v; |
| 402 | int i; |
| 403 | |
| 404 | if (!dname) |
| 405 | return 0; |
| 406 | |
| 407 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 408 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 409 | (unsigned long long)rwstat->cnt[i]); |
| 410 | |
| 411 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 412 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 413 | return v; |
| 414 | } |
| 415 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 416 | /** |
| 417 | * blkg_prfill_stat - prfill callback for blkg_stat |
| 418 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 419 | * @pd: policy private data of interest |
| 420 | * @off: offset to the blkg_stat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 421 | * |
| 422 | * prfill callback for printing a blkg_stat. |
| 423 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 424 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 425 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 426 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 427 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 428 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 429 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 430 | /** |
| 431 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
| 432 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 433 | * @pd: policy private data of interest |
| 434 | * @off: offset to the blkg_rwstat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 435 | * |
| 436 | * prfill callback for printing a blkg_rwstat. |
| 437 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 438 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 439 | int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 440 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 441 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 442 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 443 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 444 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 445 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 446 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 447 | /** |
| 448 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 449 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 450 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 451 | * @input: input string |
| 452 | * @ctx: blkg_conf_ctx to be filled |
| 453 | * |
| 454 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 455 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 456 | * value. This function returns with RCU read lock and queue lock held and |
| 457 | * must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 458 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 459 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 460 | const char *input, struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 461 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 462 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 463 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 464 | struct blkcg_gq *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 465 | unsigned int major, minor; |
| 466 | unsigned long long v; |
| 467 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 468 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 469 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 470 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 471 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 472 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 473 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 474 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 475 | |
| 476 | rcu_read_lock(); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 477 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 478 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 479 | if (blkcg_policy_enabled(disk->queue, pol)) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 480 | blkg = blkg_lookup_create(blkcg, disk->queue); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 481 | else |
| 482 | blkg = ERR_PTR(-EINVAL); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 483 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 484 | if (IS_ERR(blkg)) { |
| 485 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 486 | rcu_read_unlock(); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 487 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 488 | put_disk(disk); |
| 489 | /* |
| 490 | * If queue was bypassing, we should retry. Do so after a |
| 491 | * short msleep(). It isn't strictly necessary but queue |
| 492 | * can be bypassing for some time and it's always nice to |
| 493 | * avoid busy looping. |
| 494 | */ |
| 495 | if (ret == -EBUSY) { |
| 496 | msleep(10); |
| 497 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 498 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 499 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 500 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 501 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 502 | ctx->disk = disk; |
| 503 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 504 | ctx->v = v; |
| 505 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 506 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 507 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 508 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 509 | /** |
| 510 | * blkg_conf_finish - finish up per-blkg config update |
| 511 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 512 | * |
| 513 | * Finish up after per-blkg config update. This function must be paired |
| 514 | * with blkg_conf_prep(). |
| 515 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 516 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 517 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 518 | { |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 519 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 520 | rcu_read_unlock(); |
| 521 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 522 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 523 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 524 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 525 | struct cftype blkcg_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 526 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 527 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 528 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 529 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 530 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 531 | }; |
| 532 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 533 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 534 | * blkcg_pre_destroy - cgroup pre_destroy callback |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 535 | * @cgroup: cgroup of interest |
| 536 | * |
| 537 | * This function is called when @cgroup is about to go away and responsible |
| 538 | * for shooting down all blkgs associated with @cgroup. blkgs should be |
| 539 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 540 | * inside q lock, this function performs reverse double lock dancing. |
| 541 | * |
| 542 | * This is the blkcg counterpart of ioc_release_fn(). |
| 543 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 544 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 545 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 546 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 547 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 548 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 549 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 550 | while (!hlist_empty(&blkcg->blkg_list)) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 551 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 552 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 553 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 554 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 555 | if (spin_trylock(q->queue_lock)) { |
| 556 | blkg_destroy(blkg); |
| 557 | spin_unlock(q->queue_lock); |
| 558 | } else { |
| 559 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 560 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 561 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 562 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 563 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 564 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 565 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 566 | return 0; |
| 567 | } |
| 568 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 569 | static void blkcg_destroy(struct cgroup *cgroup) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 570 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 571 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 572 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 573 | if (blkcg != &blkcg_root) |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 574 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 575 | } |
| 576 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 577 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 578 | { |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 579 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 580 | struct blkcg *blkcg; |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 581 | struct cgroup *parent = cgroup->parent; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 582 | |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 583 | if (!parent) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 584 | blkcg = &blkcg_root; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 585 | goto done; |
| 586 | } |
| 587 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 588 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 589 | if (!blkcg) |
| 590 | return ERR_PTR(-ENOMEM); |
| 591 | |
Tejun Heo | 3381cb8 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 592 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 593 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 594 | done: |
| 595 | spin_lock_init(&blkcg->lock); |
| 596 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 597 | |
| 598 | return &blkcg->css; |
| 599 | } |
| 600 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 601 | /** |
| 602 | * blkcg_init_queue - initialize blkcg part of request queue |
| 603 | * @q: request_queue to initialize |
| 604 | * |
| 605 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 606 | * part of new request_queue @q. |
| 607 | * |
| 608 | * RETURNS: |
| 609 | * 0 on success, -errno on failure. |
| 610 | */ |
| 611 | int blkcg_init_queue(struct request_queue *q) |
| 612 | { |
| 613 | might_sleep(); |
| 614 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 615 | return blk_throtl_init(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 616 | } |
| 617 | |
| 618 | /** |
| 619 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 620 | * @q: request_queue to drain |
| 621 | * |
| 622 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 623 | */ |
| 624 | void blkcg_drain_queue(struct request_queue *q) |
| 625 | { |
| 626 | lockdep_assert_held(q->queue_lock); |
| 627 | |
| 628 | blk_throtl_drain(q); |
| 629 | } |
| 630 | |
| 631 | /** |
| 632 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 633 | * @q: request_queue being released |
| 634 | * |
| 635 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 636 | */ |
| 637 | void blkcg_exit_queue(struct request_queue *q) |
| 638 | { |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 639 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 640 | blkg_destroy_all(q); |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 641 | spin_unlock_irq(q->queue_lock); |
| 642 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 643 | blk_throtl_exit(q); |
| 644 | } |
| 645 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 646 | /* |
| 647 | * We cannot support shared io contexts, as we have no mean to support |
| 648 | * two tasks with the same ioc in two different groups without major rework |
| 649 | * of the main cic data structures. For now we allow a task to change |
| 650 | * its cgroup only if it's the only owner of its ioc. |
| 651 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 652 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 653 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 654 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 655 | struct io_context *ioc; |
| 656 | int ret = 0; |
| 657 | |
| 658 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 659 | cgroup_taskset_for_each(task, cgrp, tset) { |
| 660 | task_lock(task); |
| 661 | ioc = task->io_context; |
| 662 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 663 | ret = -EINVAL; |
| 664 | task_unlock(task); |
| 665 | if (ret) |
| 666 | break; |
| 667 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 668 | return ret; |
| 669 | } |
| 670 | |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 671 | struct cgroup_subsys blkio_subsys = { |
| 672 | .name = "blkio", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 673 | .create = blkcg_create, |
| 674 | .can_attach = blkcg_can_attach, |
| 675 | .pre_destroy = blkcg_pre_destroy, |
| 676 | .destroy = blkcg_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 677 | .subsys_id = blkio_subsys_id, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 678 | .base_cftypes = blkcg_files, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 679 | .module = THIS_MODULE, |
| 680 | }; |
| 681 | EXPORT_SYMBOL_GPL(blkio_subsys); |
| 682 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 683 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 684 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 685 | * @q: request_queue of interest |
| 686 | * @pol: blkcg policy to activate |
| 687 | * |
| 688 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 689 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 690 | * |
| 691 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 692 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 693 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 694 | * always enough for dereferencing policy data. |
| 695 | * |
| 696 | * The caller is responsible for synchronizing [de]activations and policy |
| 697 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 698 | */ |
| 699 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 700 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 701 | { |
| 702 | LIST_HEAD(pds); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 703 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 704 | struct blkg_policy_data *pd, *n; |
| 705 | int cnt = 0, ret; |
| 706 | |
| 707 | if (blkcg_policy_enabled(q, pol)) |
| 708 | return 0; |
| 709 | |
| 710 | blk_queue_bypass_start(q); |
| 711 | |
| 712 | /* make sure the root blkg exists and count the existing blkgs */ |
| 713 | spin_lock_irq(q->queue_lock); |
| 714 | |
| 715 | rcu_read_lock(); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 716 | blkg = __blkg_lookup_create(&blkcg_root, q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 717 | rcu_read_unlock(); |
| 718 | |
| 719 | if (IS_ERR(blkg)) { |
| 720 | ret = PTR_ERR(blkg); |
| 721 | goto out_unlock; |
| 722 | } |
| 723 | q->root_blkg = blkg; |
| 724 | |
| 725 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
| 726 | cnt++; |
| 727 | |
| 728 | spin_unlock_irq(q->queue_lock); |
| 729 | |
| 730 | /* allocate policy_data for all existing blkgs */ |
| 731 | while (cnt--) { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 732 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 733 | if (!pd) { |
| 734 | ret = -ENOMEM; |
| 735 | goto out_free; |
| 736 | } |
| 737 | list_add_tail(&pd->alloc_node, &pds); |
| 738 | } |
| 739 | |
| 740 | /* |
| 741 | * Install the allocated pds. With @q bypassing, no new blkg |
| 742 | * should have been created while the queue lock was dropped. |
| 743 | */ |
| 744 | spin_lock_irq(q->queue_lock); |
| 745 | |
| 746 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 747 | if (WARN_ON(list_empty(&pds))) { |
| 748 | /* umm... this shouldn't happen, just abort */ |
| 749 | ret = -ENOMEM; |
| 750 | goto out_unlock; |
| 751 | } |
| 752 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); |
| 753 | list_del_init(&pd->alloc_node); |
| 754 | |
| 755 | /* grab blkcg lock too while installing @pd on @blkg */ |
| 756 | spin_lock(&blkg->blkcg->lock); |
| 757 | |
| 758 | blkg->pd[pol->plid] = pd; |
| 759 | pd->blkg = blkg; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 760 | pol->pd_init_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 761 | |
| 762 | spin_unlock(&blkg->blkcg->lock); |
| 763 | } |
| 764 | |
| 765 | __set_bit(pol->plid, q->blkcg_pols); |
| 766 | ret = 0; |
| 767 | out_unlock: |
| 768 | spin_unlock_irq(q->queue_lock); |
| 769 | out_free: |
| 770 | blk_queue_bypass_end(q); |
| 771 | list_for_each_entry_safe(pd, n, &pds, alloc_node) |
| 772 | kfree(pd); |
| 773 | return ret; |
| 774 | } |
| 775 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 776 | |
| 777 | /** |
| 778 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 779 | * @q: request_queue of interest |
| 780 | * @pol: blkcg policy to deactivate |
| 781 | * |
| 782 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 783 | * blkcg_activate_policy(). |
| 784 | */ |
| 785 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 786 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 787 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 788 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 789 | |
| 790 | if (!blkcg_policy_enabled(q, pol)) |
| 791 | return; |
| 792 | |
| 793 | blk_queue_bypass_start(q); |
| 794 | spin_lock_irq(q->queue_lock); |
| 795 | |
| 796 | __clear_bit(pol->plid, q->blkcg_pols); |
| 797 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 798 | /* if no policy is left, no need for blkgs - shoot them down */ |
| 799 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) |
| 800 | blkg_destroy_all(q); |
| 801 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 802 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 803 | /* grab blkcg lock too while removing @pd from @blkg */ |
| 804 | spin_lock(&blkg->blkcg->lock); |
| 805 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 806 | if (pol->pd_exit_fn) |
| 807 | pol->pd_exit_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 808 | |
| 809 | kfree(blkg->pd[pol->plid]); |
| 810 | blkg->pd[pol->plid] = NULL; |
| 811 | |
| 812 | spin_unlock(&blkg->blkcg->lock); |
| 813 | } |
| 814 | |
| 815 | spin_unlock_irq(q->queue_lock); |
| 816 | blk_queue_bypass_end(q); |
| 817 | } |
| 818 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 819 | |
| 820 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 821 | * blkcg_policy_register - register a blkcg policy |
| 822 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 823 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 824 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 825 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 826 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 827 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 828 | { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 829 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 830 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 831 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
| 832 | return -EINVAL; |
| 833 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 834 | mutex_lock(&blkcg_pol_mutex); |
| 835 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 836 | /* find an empty slot */ |
| 837 | ret = -ENOSPC; |
| 838 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 839 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 840 | break; |
| 841 | if (i >= BLKCG_MAX_POLS) |
| 842 | goto out_unlock; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 843 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 844 | /* register and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 845 | pol->plid = i; |
| 846 | blkcg_policy[i] = pol; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 847 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 848 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 849 | if (pol->cftypes) |
| 850 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 851 | ret = 0; |
| 852 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 853 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 854 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 855 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 856 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 857 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 858 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 859 | * blkcg_policy_unregister - unregister a blkcg policy |
| 860 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 861 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 862 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 863 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 864 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 865 | { |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 866 | mutex_lock(&blkcg_pol_mutex); |
| 867 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 868 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 869 | goto out_unlock; |
| 870 | |
| 871 | /* kill the intf files first */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 872 | if (pol->cftypes) |
| 873 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 874 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 875 | /* unregister and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 876 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 877 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 878 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 879 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 880 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |