Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
| 12 | */ |
| 13 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 14 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 15 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 16 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 19 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 20 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 22 | #include "blk-cgroup.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 23 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 24 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 25 | #define MAX_KEY_LEN 100 |
| 26 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 28 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
| 30 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 31 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 33 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 34 | struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 35 | { |
| 36 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 37 | struct blkcg, css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 38 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(cgroup_to_blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 40 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 41 | static struct blkcg *task_blkcg(struct task_struct *tsk) |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 42 | { |
| 43 | return container_of(task_subsys_state(tsk, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 44 | struct blkcg, css); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 45 | } |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 46 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 47 | struct blkcg *bio_blkcg(struct bio *bio) |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 48 | { |
| 49 | if (bio && bio->bi_css) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 50 | return container_of(bio->bi_css, struct blkcg, css); |
| 51 | return task_blkcg(current); |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 52 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL_GPL(bio_blkcg); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 54 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 55 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 56 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 57 | { |
| 58 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 59 | } |
| 60 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 61 | /** |
| 62 | * blkg_free - free a blkg |
| 63 | * @blkg: blkg to free |
| 64 | * |
| 65 | * Free @blkg which may be partially allocated. |
| 66 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 67 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 68 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 69 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 70 | |
| 71 | if (!blkg) |
| 72 | return; |
| 73 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 74 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 75 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 76 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 77 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 78 | if (!pd) |
| 79 | continue; |
| 80 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 81 | if (pol && pol->pd_exit_fn) |
| 82 | pol->pd_exit_fn(blkg); |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 83 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 84 | kfree(pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 85 | } |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 86 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 87 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | /** |
| 91 | * blkg_alloc - allocate a blkg |
| 92 | * @blkcg: block cgroup the new blkg is associated with |
| 93 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 94 | * @gfp_mask: allocation mask to use |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 95 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 96 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 97 | */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 98 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
| 99 | gfp_t gfp_mask) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 100 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 101 | struct blkcg_gq *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 102 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 103 | |
| 104 | /* alloc and init base part */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 105 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 106 | if (!blkg) |
| 107 | return NULL; |
| 108 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 109 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 110 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 111 | blkg->blkcg = blkcg; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 112 | blkg->refcnt = 1; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 113 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 114 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 115 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 116 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 117 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 118 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 119 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 120 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 121 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 122 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 123 | if (!pd) { |
| 124 | blkg_free(blkg); |
| 125 | return NULL; |
| 126 | } |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 127 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 128 | blkg->pd[i] = pd; |
| 129 | pd->blkg = blkg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 130 | |
Tejun Heo | 9b2ea86 | 2012-06-04 15:21:00 +0900 | [diff] [blame] | 131 | /* invoke per-policy init */ |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 132 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 133 | pol->pd_init_fn(blkg); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 134 | } |
| 135 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 136 | return blkg; |
| 137 | } |
| 138 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 139 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
| 140 | struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 141 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 142 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 143 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 144 | blkg = rcu_dereference(blkcg->blkg_hint); |
| 145 | if (blkg && blkg->q == q) |
| 146 | return blkg; |
| 147 | |
| 148 | /* |
| 149 | * Hint didn't match. Look up from the radix tree. Note that we |
| 150 | * may not be holding queue_lock and thus are not sure whether |
| 151 | * @blkg from blkg_tree has already been removed or not, so we |
| 152 | * can't update hint to the lookup result. Leave it to the caller. |
| 153 | */ |
| 154 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
| 155 | if (blkg && blkg->q == q) |
| 156 | return blkg; |
| 157 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 158 | return NULL; |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 163 | * @blkcg: blkcg of interest |
| 164 | * @q: request_queue of interest |
| 165 | * |
| 166 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 167 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 168 | * - see blk_queue_bypass_start() for details. |
| 169 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 170 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 171 | { |
| 172 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 173 | |
| 174 | if (unlikely(blk_queue_bypass(q))) |
| 175 | return NULL; |
| 176 | return __blkg_lookup(blkcg, q); |
| 177 | } |
| 178 | EXPORT_SYMBOL_GPL(blkg_lookup); |
| 179 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 180 | /* |
| 181 | * If @new_blkg is %NULL, this function tries to allocate a new one as |
| 182 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. |
| 183 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 184 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 185 | struct request_queue *q, |
| 186 | struct blkcg_gq *new_blkg) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 187 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 188 | struct blkcg_gq *blkg; |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 189 | int ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 190 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 191 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 192 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 193 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 194 | /* lookup and update hint on success, see __blkg_lookup() for details */ |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 195 | blkg = __blkg_lookup(blkcg, q); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 196 | if (blkg) { |
| 197 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 198 | goto out_free; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 199 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 200 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 201 | /* blkg holds a reference to blkcg */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 202 | if (!css_tryget(&blkcg->css)) { |
| 203 | blkg = ERR_PTR(-EINVAL); |
| 204 | goto out_free; |
| 205 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 206 | |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 207 | /* allocate */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 208 | if (!new_blkg) { |
| 209 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); |
| 210 | if (unlikely(!new_blkg)) { |
| 211 | blkg = ERR_PTR(-ENOMEM); |
| 212 | goto out_put; |
| 213 | } |
| 214 | } |
| 215 | blkg = new_blkg; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 216 | |
| 217 | /* insert */ |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 218 | spin_lock(&blkcg->lock); |
| 219 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
| 220 | if (likely(!ret)) { |
| 221 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 222 | list_add(&blkg->q_node, &q->blkg_list); |
| 223 | } |
| 224 | spin_unlock(&blkcg->lock); |
| 225 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 226 | if (!ret) |
| 227 | return blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 228 | |
| 229 | blkg = ERR_PTR(ret); |
| 230 | out_put: |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 231 | css_put(&blkcg->css); |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 232 | out_free: |
| 233 | blkg_free(new_blkg); |
| 234 | return blkg; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 235 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 236 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 237 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 238 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 239 | { |
| 240 | /* |
| 241 | * This could be the first entry point of blkcg implementation and |
| 242 | * we shouldn't allow anything to go through for a bypassing queue. |
| 243 | */ |
| 244 | if (unlikely(blk_queue_bypass(q))) |
| 245 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 246 | return __blkg_lookup_create(blkcg, q, NULL); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 247 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 248 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 249 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 250 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 251 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 252 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 253 | |
Tejun Heo | 27e1f9d | 2012-06-05 13:36:44 +0200 | [diff] [blame] | 254 | lockdep_assert_held(blkg->q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 255 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 256 | |
| 257 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 258 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 259 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 260 | |
| 261 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 262 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 263 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 264 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 265 | /* |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 266 | * Both setting lookup hint to and clearing it from @blkg are done |
| 267 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 268 | * will. Hint assignment itself can race safely. |
| 269 | */ |
| 270 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) |
| 271 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 272 | |
| 273 | /* |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 274 | * Put the reference taken at the time of creation so that when all |
| 275 | * queues are gone, group can be destroyed. |
| 276 | */ |
| 277 | blkg_put(blkg); |
| 278 | } |
| 279 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 280 | /** |
| 281 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 282 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 283 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 284 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 285 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 286 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 287 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 288 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 289 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 290 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 291 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 292 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 293 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 294 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 295 | spin_lock(&blkcg->lock); |
| 296 | blkg_destroy(blkg); |
| 297 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 298 | } |
| 299 | } |
| 300 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 301 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
| 302 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 303 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 304 | } |
| 305 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 306 | void __blkg_release(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 307 | { |
| 308 | /* release the extra blkcg reference this blkg has been holding */ |
| 309 | css_put(&blkg->blkcg->css); |
| 310 | |
| 311 | /* |
| 312 | * A group is freed in rcu manner. But having an rcu lock does not |
| 313 | * mean that one can access all the fields of blkg and assume these |
| 314 | * are valid. For example, don't try to follow throtl_data and |
| 315 | * request queue links. |
| 316 | * |
| 317 | * Having a reference to blkg under an rcu allows acess to only |
| 318 | * values local to groups like group stats and group rate limits |
| 319 | */ |
| 320 | call_rcu(&blkg->rcu_head, blkg_rcu_free); |
| 321 | } |
| 322 | EXPORT_SYMBOL_GPL(__blkg_release); |
| 323 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 324 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
| 325 | u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 326 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 327 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
| 328 | struct blkcg_gq *blkg; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 329 | struct hlist_node *n; |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 330 | int i; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 331 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 332 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 333 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 334 | |
| 335 | /* |
| 336 | * Note that stat reset is racy - it doesn't synchronize against |
| 337 | * stat updates. This is a debug feature which shouldn't exist |
| 338 | * anyway. If you get hit by a race, retry. |
| 339 | */ |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 340 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 341 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 342 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 343 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 344 | if (blkcg_policy_enabled(blkg->q, pol) && |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 345 | pol->pd_reset_stats_fn) |
| 346 | pol->pd_reset_stats_fn(blkg); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 347 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 348 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 349 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 350 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 351 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 352 | return 0; |
| 353 | } |
| 354 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 355 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 356 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 357 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 358 | if (blkg->q->backing_dev_info.dev) |
| 359 | return dev_name(blkg->q->backing_dev_info.dev); |
| 360 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 361 | } |
| 362 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 363 | /** |
| 364 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 365 | * @sf: seq_file to print to |
| 366 | * @blkcg: blkcg of interest |
| 367 | * @prfill: fill function to print out a blkg |
| 368 | * @pol: policy in question |
| 369 | * @data: data to be passed to @prfill |
| 370 | * @show_total: to print out sum of prfill return values or not |
| 371 | * |
| 372 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 373 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 374 | * policy data and @data. If @show_total is %true, the sum of the return |
| 375 | * values from @prfill is printed with "Total" label at the end. |
| 376 | * |
| 377 | * This is to be used to construct print functions for |
| 378 | * cftype->read_seq_string method. |
| 379 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 380 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 381 | u64 (*prfill)(struct seq_file *, |
| 382 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 383 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 384 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 385 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 386 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 387 | struct hlist_node *n; |
| 388 | u64 total = 0; |
| 389 | |
| 390 | spin_lock_irq(&blkcg->lock); |
| 391 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 392 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 393 | total += prfill(sf, blkg->pd[pol->plid], data); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 394 | spin_unlock_irq(&blkcg->lock); |
| 395 | |
| 396 | if (show_total) |
| 397 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 398 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 399 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 400 | |
| 401 | /** |
| 402 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 403 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 404 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 405 | * @v: value to print |
| 406 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 407 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 408 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 409 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 410 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 411 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 412 | |
| 413 | if (!dname) |
| 414 | return 0; |
| 415 | |
| 416 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 417 | return v; |
| 418 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 419 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 420 | |
| 421 | /** |
| 422 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 423 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 424 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 425 | * @rwstat: rwstat to print |
| 426 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 427 | * Print @rwstat to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 428 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 429 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 430 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 431 | { |
| 432 | static const char *rwstr[] = { |
| 433 | [BLKG_RWSTAT_READ] = "Read", |
| 434 | [BLKG_RWSTAT_WRITE] = "Write", |
| 435 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 436 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 437 | }; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 438 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 439 | u64 v; |
| 440 | int i; |
| 441 | |
| 442 | if (!dname) |
| 443 | return 0; |
| 444 | |
| 445 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 446 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 447 | (unsigned long long)rwstat->cnt[i]); |
| 448 | |
| 449 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 450 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 451 | return v; |
| 452 | } |
| 453 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 454 | /** |
| 455 | * blkg_prfill_stat - prfill callback for blkg_stat |
| 456 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 457 | * @pd: policy private data of interest |
| 458 | * @off: offset to the blkg_stat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 459 | * |
| 460 | * prfill callback for printing a blkg_stat. |
| 461 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 462 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 463 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 464 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 465 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 466 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 467 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 468 | /** |
| 469 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
| 470 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 471 | * @pd: policy private data of interest |
| 472 | * @off: offset to the blkg_rwstat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 473 | * |
| 474 | * prfill callback for printing a blkg_rwstat. |
| 475 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 476 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 477 | int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 478 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 479 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 480 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 481 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 482 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 483 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 484 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 485 | /** |
| 486 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 487 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 488 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 489 | * @input: input string |
| 490 | * @ctx: blkg_conf_ctx to be filled |
| 491 | * |
| 492 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 493 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 494 | * value. This function returns with RCU read lock and queue lock held and |
| 495 | * must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 496 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 497 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 498 | const char *input, struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 499 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 500 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 501 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 502 | struct blkcg_gq *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 503 | unsigned int major, minor; |
| 504 | unsigned long long v; |
| 505 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 506 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 507 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 508 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 509 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 510 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 511 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 512 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 513 | |
| 514 | rcu_read_lock(); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 515 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 516 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 517 | if (blkcg_policy_enabled(disk->queue, pol)) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 518 | blkg = blkg_lookup_create(blkcg, disk->queue); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 519 | else |
| 520 | blkg = ERR_PTR(-EINVAL); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 521 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 522 | if (IS_ERR(blkg)) { |
| 523 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 524 | rcu_read_unlock(); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 525 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 526 | put_disk(disk); |
| 527 | /* |
| 528 | * If queue was bypassing, we should retry. Do so after a |
| 529 | * short msleep(). It isn't strictly necessary but queue |
| 530 | * can be bypassing for some time and it's always nice to |
| 531 | * avoid busy looping. |
| 532 | */ |
| 533 | if (ret == -EBUSY) { |
| 534 | msleep(10); |
| 535 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 536 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 537 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 538 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 539 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 540 | ctx->disk = disk; |
| 541 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 542 | ctx->v = v; |
| 543 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 544 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 545 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 546 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 547 | /** |
| 548 | * blkg_conf_finish - finish up per-blkg config update |
| 549 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 550 | * |
| 551 | * Finish up after per-blkg config update. This function must be paired |
| 552 | * with blkg_conf_prep(). |
| 553 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 554 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 555 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 556 | { |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 557 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 558 | rcu_read_unlock(); |
| 559 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 560 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 561 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 562 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 563 | struct cftype blkcg_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 564 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 565 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 566 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 567 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 568 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 569 | }; |
| 570 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 571 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 572 | * blkcg_pre_destroy - cgroup pre_destroy callback |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 573 | * @cgroup: cgroup of interest |
| 574 | * |
| 575 | * This function is called when @cgroup is about to go away and responsible |
| 576 | * for shooting down all blkgs associated with @cgroup. blkgs should be |
| 577 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 578 | * inside q lock, this function performs reverse double lock dancing. |
| 579 | * |
| 580 | * This is the blkcg counterpart of ioc_release_fn(). |
| 581 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 582 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 583 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 584 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 585 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 586 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 587 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 588 | while (!hlist_empty(&blkcg->blkg_list)) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 589 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 590 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 591 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 592 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 593 | if (spin_trylock(q->queue_lock)) { |
| 594 | blkg_destroy(blkg); |
| 595 | spin_unlock(q->queue_lock); |
| 596 | } else { |
| 597 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 598 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 599 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 600 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 601 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 602 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 603 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 604 | return 0; |
| 605 | } |
| 606 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 607 | static void blkcg_destroy(struct cgroup *cgroup) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 608 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 609 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 610 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 611 | if (blkcg != &blkcg_root) |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 612 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 613 | } |
| 614 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 615 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 616 | { |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 617 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 618 | struct blkcg *blkcg; |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 619 | struct cgroup *parent = cgroup->parent; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 620 | |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 621 | if (!parent) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 622 | blkcg = &blkcg_root; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 623 | goto done; |
| 624 | } |
| 625 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 626 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 627 | if (!blkcg) |
| 628 | return ERR_PTR(-ENOMEM); |
| 629 | |
Tejun Heo | 3381cb8 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 630 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 631 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 632 | done: |
| 633 | spin_lock_init(&blkcg->lock); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 634 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 635 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 636 | |
| 637 | return &blkcg->css; |
| 638 | } |
| 639 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 640 | /** |
| 641 | * blkcg_init_queue - initialize blkcg part of request queue |
| 642 | * @q: request_queue to initialize |
| 643 | * |
| 644 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 645 | * part of new request_queue @q. |
| 646 | * |
| 647 | * RETURNS: |
| 648 | * 0 on success, -errno on failure. |
| 649 | */ |
| 650 | int blkcg_init_queue(struct request_queue *q) |
| 651 | { |
| 652 | might_sleep(); |
| 653 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 654 | return blk_throtl_init(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | /** |
| 658 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 659 | * @q: request_queue to drain |
| 660 | * |
| 661 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 662 | */ |
| 663 | void blkcg_drain_queue(struct request_queue *q) |
| 664 | { |
| 665 | lockdep_assert_held(q->queue_lock); |
| 666 | |
| 667 | blk_throtl_drain(q); |
| 668 | } |
| 669 | |
| 670 | /** |
| 671 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 672 | * @q: request_queue being released |
| 673 | * |
| 674 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 675 | */ |
| 676 | void blkcg_exit_queue(struct request_queue *q) |
| 677 | { |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 678 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 679 | blkg_destroy_all(q); |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 680 | spin_unlock_irq(q->queue_lock); |
| 681 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 682 | blk_throtl_exit(q); |
| 683 | } |
| 684 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 685 | /* |
| 686 | * We cannot support shared io contexts, as we have no mean to support |
| 687 | * two tasks with the same ioc in two different groups without major rework |
| 688 | * of the main cic data structures. For now we allow a task to change |
| 689 | * its cgroup only if it's the only owner of its ioc. |
| 690 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 691 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 692 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 693 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 694 | struct io_context *ioc; |
| 695 | int ret = 0; |
| 696 | |
| 697 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 698 | cgroup_taskset_for_each(task, cgrp, tset) { |
| 699 | task_lock(task); |
| 700 | ioc = task->io_context; |
| 701 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 702 | ret = -EINVAL; |
| 703 | task_unlock(task); |
| 704 | if (ret) |
| 705 | break; |
| 706 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 707 | return ret; |
| 708 | } |
| 709 | |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 710 | struct cgroup_subsys blkio_subsys = { |
| 711 | .name = "blkio", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 712 | .create = blkcg_create, |
| 713 | .can_attach = blkcg_can_attach, |
| 714 | .pre_destroy = blkcg_pre_destroy, |
| 715 | .destroy = blkcg_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 716 | .subsys_id = blkio_subsys_id, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 717 | .base_cftypes = blkcg_files, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 718 | .module = THIS_MODULE, |
| 719 | }; |
| 720 | EXPORT_SYMBOL_GPL(blkio_subsys); |
| 721 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 722 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 723 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 724 | * @q: request_queue of interest |
| 725 | * @pol: blkcg policy to activate |
| 726 | * |
| 727 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 728 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 729 | * |
| 730 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 731 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 732 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 733 | * always enough for dereferencing policy data. |
| 734 | * |
| 735 | * The caller is responsible for synchronizing [de]activations and policy |
| 736 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 737 | */ |
| 738 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 739 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 740 | { |
| 741 | LIST_HEAD(pds); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 742 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 743 | struct blkg_policy_data *pd, *n; |
| 744 | int cnt = 0, ret; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 745 | bool preloaded; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 746 | |
| 747 | if (blkcg_policy_enabled(q, pol)) |
| 748 | return 0; |
| 749 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 750 | /* preallocations for root blkg */ |
| 751 | blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); |
| 752 | if (!blkg) |
| 753 | return -ENOMEM; |
| 754 | |
| 755 | preloaded = !radix_tree_preload(GFP_KERNEL); |
| 756 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 757 | blk_queue_bypass_start(q); |
| 758 | |
| 759 | /* make sure the root blkg exists and count the existing blkgs */ |
| 760 | spin_lock_irq(q->queue_lock); |
| 761 | |
| 762 | rcu_read_lock(); |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 763 | blkg = __blkg_lookup_create(&blkcg_root, q, blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 764 | rcu_read_unlock(); |
| 765 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame^] | 766 | if (preloaded) |
| 767 | radix_tree_preload_end(); |
| 768 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 769 | if (IS_ERR(blkg)) { |
| 770 | ret = PTR_ERR(blkg); |
| 771 | goto out_unlock; |
| 772 | } |
| 773 | q->root_blkg = blkg; |
| 774 | |
| 775 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
| 776 | cnt++; |
| 777 | |
| 778 | spin_unlock_irq(q->queue_lock); |
| 779 | |
| 780 | /* allocate policy_data for all existing blkgs */ |
| 781 | while (cnt--) { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 782 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 783 | if (!pd) { |
| 784 | ret = -ENOMEM; |
| 785 | goto out_free; |
| 786 | } |
| 787 | list_add_tail(&pd->alloc_node, &pds); |
| 788 | } |
| 789 | |
| 790 | /* |
| 791 | * Install the allocated pds. With @q bypassing, no new blkg |
| 792 | * should have been created while the queue lock was dropped. |
| 793 | */ |
| 794 | spin_lock_irq(q->queue_lock); |
| 795 | |
| 796 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 797 | if (WARN_ON(list_empty(&pds))) { |
| 798 | /* umm... this shouldn't happen, just abort */ |
| 799 | ret = -ENOMEM; |
| 800 | goto out_unlock; |
| 801 | } |
| 802 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); |
| 803 | list_del_init(&pd->alloc_node); |
| 804 | |
| 805 | /* grab blkcg lock too while installing @pd on @blkg */ |
| 806 | spin_lock(&blkg->blkcg->lock); |
| 807 | |
| 808 | blkg->pd[pol->plid] = pd; |
| 809 | pd->blkg = blkg; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 810 | pol->pd_init_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 811 | |
| 812 | spin_unlock(&blkg->blkcg->lock); |
| 813 | } |
| 814 | |
| 815 | __set_bit(pol->plid, q->blkcg_pols); |
| 816 | ret = 0; |
| 817 | out_unlock: |
| 818 | spin_unlock_irq(q->queue_lock); |
| 819 | out_free: |
| 820 | blk_queue_bypass_end(q); |
| 821 | list_for_each_entry_safe(pd, n, &pds, alloc_node) |
| 822 | kfree(pd); |
| 823 | return ret; |
| 824 | } |
| 825 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 826 | |
| 827 | /** |
| 828 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 829 | * @q: request_queue of interest |
| 830 | * @pol: blkcg policy to deactivate |
| 831 | * |
| 832 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 833 | * blkcg_activate_policy(). |
| 834 | */ |
| 835 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 836 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 837 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 838 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 839 | |
| 840 | if (!blkcg_policy_enabled(q, pol)) |
| 841 | return; |
| 842 | |
| 843 | blk_queue_bypass_start(q); |
| 844 | spin_lock_irq(q->queue_lock); |
| 845 | |
| 846 | __clear_bit(pol->plid, q->blkcg_pols); |
| 847 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 848 | /* if no policy is left, no need for blkgs - shoot them down */ |
| 849 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) |
| 850 | blkg_destroy_all(q); |
| 851 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 852 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 853 | /* grab blkcg lock too while removing @pd from @blkg */ |
| 854 | spin_lock(&blkg->blkcg->lock); |
| 855 | |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 856 | if (pol->pd_exit_fn) |
| 857 | pol->pd_exit_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 858 | |
| 859 | kfree(blkg->pd[pol->plid]); |
| 860 | blkg->pd[pol->plid] = NULL; |
| 861 | |
| 862 | spin_unlock(&blkg->blkcg->lock); |
| 863 | } |
| 864 | |
| 865 | spin_unlock_irq(q->queue_lock); |
| 866 | blk_queue_bypass_end(q); |
| 867 | } |
| 868 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 869 | |
| 870 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 871 | * blkcg_policy_register - register a blkcg policy |
| 872 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 873 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 874 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 875 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 876 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 877 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 878 | { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 879 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 880 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 881 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
| 882 | return -EINVAL; |
| 883 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 884 | mutex_lock(&blkcg_pol_mutex); |
| 885 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 886 | /* find an empty slot */ |
| 887 | ret = -ENOSPC; |
| 888 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 889 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 890 | break; |
| 891 | if (i >= BLKCG_MAX_POLS) |
| 892 | goto out_unlock; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 893 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 894 | /* register and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 895 | pol->plid = i; |
| 896 | blkcg_policy[i] = pol; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 897 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 898 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 899 | if (pol->cftypes) |
| 900 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 901 | ret = 0; |
| 902 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 903 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 904 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 905 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 906 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 907 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 908 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 909 | * blkcg_policy_unregister - unregister a blkcg policy |
| 910 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 911 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 912 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 913 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 914 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 915 | { |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 916 | mutex_lock(&blkcg_pol_mutex); |
| 917 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 918 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 919 | goto out_unlock; |
| 920 | |
| 921 | /* kill the intf files first */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 922 | if (pol->cftypes) |
| 923 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 924 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 925 | /* unregister and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 926 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 927 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 928 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 929 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 930 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |