Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
| 12 | */ |
| 13 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 14 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 15 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 16 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 19 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 20 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 22 | #include "blk-cgroup.h" |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 23 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 24 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 25 | #define MAX_KEY_LEN 100 |
| 26 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 28 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
| 30 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 31 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 33 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 34 | struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 35 | { |
| 36 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 37 | struct blkcg, css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 38 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 39 | EXPORT_SYMBOL_GPL(cgroup_to_blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 40 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 41 | static struct blkcg *task_blkcg(struct task_struct *tsk) |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 42 | { |
| 43 | return container_of(task_subsys_state(tsk, blkio_subsys_id), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 44 | struct blkcg, css); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 45 | } |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 46 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 47 | struct blkcg *bio_blkcg(struct bio *bio) |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 48 | { |
| 49 | if (bio && bio->bi_css) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 50 | return container_of(bio->bi_css, struct blkcg, css); |
| 51 | return task_blkcg(current); |
Tejun Heo | 4f85cb9 | 2012-03-05 13:15:28 -0800 | [diff] [blame] | 52 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 53 | EXPORT_SYMBOL_GPL(bio_blkcg); |
Vivek Goyal | 70087dc | 2011-05-16 15:24:08 +0200 | [diff] [blame] | 54 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 55 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 56 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 57 | { |
| 58 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 59 | } |
| 60 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 61 | static size_t blkg_pd_size(const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 62 | { |
| 63 | return sizeof(struct blkg_policy_data) + pol->pdata_size; |
| 64 | } |
| 65 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 66 | /** |
| 67 | * blkg_free - free a blkg |
| 68 | * @blkg: blkg to free |
| 69 | * |
| 70 | * Free @blkg which may be partially allocated. |
| 71 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 72 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 73 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 74 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 75 | |
| 76 | if (!blkg) |
| 77 | return; |
| 78 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 79 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 80 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 81 | struct blkg_policy_data *pd = blkg->pd[i]; |
| 82 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 83 | if (!pd) |
| 84 | continue; |
| 85 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 86 | if (pol && pol->ops.pd_exit_fn) |
| 87 | pol->ops.pd_exit_fn(blkg); |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 88 | |
Tejun Heo | 9ade5ea | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 89 | kfree(pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 90 | } |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 91 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 92 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | /** |
| 96 | * blkg_alloc - allocate a blkg |
| 97 | * @blkcg: block cgroup the new blkg is associated with |
| 98 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 99 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 100 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 101 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 102 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 103 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 104 | struct blkcg_gq *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 105 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 106 | |
| 107 | /* alloc and init base part */ |
| 108 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); |
| 109 | if (!blkg) |
| 110 | return NULL; |
| 111 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 112 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 113 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 114 | blkg->blkcg = blkcg; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 115 | blkg->refcnt = 1; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 116 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 117 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 118 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 119 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 120 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 121 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 122 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 123 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 124 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 125 | pd = kzalloc_node(blkg_pd_size(pol), GFP_ATOMIC, q->node); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 126 | if (!pd) { |
| 127 | blkg_free(blkg); |
| 128 | return NULL; |
| 129 | } |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 130 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 131 | blkg->pd[i] = pd; |
| 132 | pd->blkg = blkg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 133 | } |
| 134 | |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 135 | /* invoke per-policy init */ |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 136 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 137 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 138 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 139 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 140 | pol->ops.pd_init_fn(blkg); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 141 | } |
| 142 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 143 | return blkg; |
| 144 | } |
| 145 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 146 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
| 147 | struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 148 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 149 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 150 | struct hlist_node *n; |
| 151 | |
| 152 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
| 153 | if (blkg->q == q) |
| 154 | return blkg; |
| 155 | return NULL; |
| 156 | } |
| 157 | |
| 158 | /** |
| 159 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 160 | * @blkcg: blkcg of interest |
| 161 | * @q: request_queue of interest |
| 162 | * |
| 163 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 164 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 165 | * - see blk_queue_bypass_start() for details. |
| 166 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 167 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 168 | { |
| 169 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 170 | |
| 171 | if (unlikely(blk_queue_bypass(q))) |
| 172 | return NULL; |
| 173 | return __blkg_lookup(blkcg, q); |
| 174 | } |
| 175 | EXPORT_SYMBOL_GPL(blkg_lookup); |
| 176 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 177 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
| 178 | struct request_queue *q) |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 179 | __releases(q->queue_lock) __acquires(q->queue_lock) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 180 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 181 | struct blkcg_gq *blkg; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 182 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 183 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 184 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 185 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 186 | blkg = __blkg_lookup(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 187 | if (blkg) |
| 188 | return blkg; |
| 189 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 190 | /* blkg holds a reference to blkcg */ |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 191 | if (!css_tryget(&blkcg->css)) |
| 192 | return ERR_PTR(-EINVAL); |
| 193 | |
| 194 | /* |
| 195 | * Allocate and initialize. |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 196 | */ |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 197 | blkg = blkg_alloc(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 198 | |
| 199 | /* did alloc fail? */ |
Vivek Goyal | 1cd9e03 | 2012-03-08 10:53:56 -0800 | [diff] [blame] | 200 | if (unlikely(!blkg)) { |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 201 | blkg = ERR_PTR(-ENOMEM); |
| 202 | goto out; |
| 203 | } |
| 204 | |
| 205 | /* insert */ |
| 206 | spin_lock(&blkcg->lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 207 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 208 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 209 | spin_unlock(&blkcg->lock); |
| 210 | out: |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 211 | return blkg; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 212 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 213 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 214 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 215 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 216 | { |
| 217 | /* |
| 218 | * This could be the first entry point of blkcg implementation and |
| 219 | * we shouldn't allow anything to go through for a bypassing queue. |
| 220 | */ |
| 221 | if (unlikely(blk_queue_bypass(q))) |
| 222 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); |
| 223 | return __blkg_lookup_create(blkcg, q); |
| 224 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 225 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 226 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 227 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 228 | { |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 229 | struct request_queue *q = blkg->q; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 230 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 231 | |
| 232 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 233 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 234 | |
| 235 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 236 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 237 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 238 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 239 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 240 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 241 | /* |
| 242 | * Put the reference taken at the time of creation so that when all |
| 243 | * queues are gone, group can be destroyed. |
| 244 | */ |
| 245 | blkg_put(blkg); |
| 246 | } |
| 247 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 248 | /** |
| 249 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 250 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 251 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 252 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 253 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 254 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 255 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 256 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 257 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 258 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 259 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 260 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 261 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 262 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 263 | spin_lock(&blkcg->lock); |
| 264 | blkg_destroy(blkg); |
| 265 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 266 | } |
| 267 | } |
| 268 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 269 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
| 270 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 271 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 272 | } |
| 273 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 274 | void __blkg_release(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 275 | { |
| 276 | /* release the extra blkcg reference this blkg has been holding */ |
| 277 | css_put(&blkg->blkcg->css); |
| 278 | |
| 279 | /* |
| 280 | * A group is freed in rcu manner. But having an rcu lock does not |
| 281 | * mean that one can access all the fields of blkg and assume these |
| 282 | * are valid. For example, don't try to follow throtl_data and |
| 283 | * request queue links. |
| 284 | * |
| 285 | * Having a reference to blkg under an rcu allows acess to only |
| 286 | * values local to groups like group stats and group rate limits |
| 287 | */ |
| 288 | call_rcu(&blkg->rcu_head, blkg_rcu_free); |
| 289 | } |
| 290 | EXPORT_SYMBOL_GPL(__blkg_release); |
| 291 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 292 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
| 293 | u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 294 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 295 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
| 296 | struct blkcg_gq *blkg; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 297 | struct hlist_node *n; |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 298 | int i; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 299 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 300 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 301 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 302 | |
| 303 | /* |
| 304 | * Note that stat reset is racy - it doesn't synchronize against |
| 305 | * stat updates. This is a debug feature which shouldn't exist |
| 306 | * anyway. If you get hit by a race, retry. |
| 307 | */ |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 308 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 309 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 310 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 311 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 312 | if (blkcg_policy_enabled(blkg->q, pol) && |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 313 | pol->ops.pd_reset_stats_fn) |
| 314 | pol->ops.pd_reset_stats_fn(blkg); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 315 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 316 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 317 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 318 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 319 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 320 | return 0; |
| 321 | } |
| 322 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 323 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 324 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 325 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 326 | if (blkg->q->backing_dev_info.dev) |
| 327 | return dev_name(blkg->q->backing_dev_info.dev); |
| 328 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 329 | } |
| 330 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 331 | /** |
| 332 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 333 | * @sf: seq_file to print to |
| 334 | * @blkcg: blkcg of interest |
| 335 | * @prfill: fill function to print out a blkg |
| 336 | * @pol: policy in question |
| 337 | * @data: data to be passed to @prfill |
| 338 | * @show_total: to print out sum of prfill return values or not |
| 339 | * |
| 340 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 341 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
| 342 | * policy data and @data. If @show_total is %true, the sum of the return |
| 343 | * values from @prfill is printed with "Total" label at the end. |
| 344 | * |
| 345 | * This is to be used to construct print functions for |
| 346 | * cftype->read_seq_string method. |
| 347 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 348 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 349 | u64 (*prfill)(struct seq_file *, void *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 350 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 351 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 352 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 353 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 354 | struct hlist_node *n; |
| 355 | u64 total = 0; |
| 356 | |
| 357 | spin_lock_irq(&blkcg->lock); |
| 358 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 359 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 360 | total += prfill(sf, blkg->pd[pol->plid]->pdata, data); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 361 | spin_unlock_irq(&blkcg->lock); |
| 362 | |
| 363 | if (show_total) |
| 364 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 365 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 366 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 367 | |
| 368 | /** |
| 369 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 370 | * @sf: seq_file to print to |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 371 | * @pdata: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 372 | * @v: value to print |
| 373 | * |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 374 | * Print @v to @sf for the device assocaited with @pdata. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 375 | */ |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 376 | u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 377 | { |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 378 | const char *dname = blkg_dev_name(pdata_to_blkg(pdata)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 379 | |
| 380 | if (!dname) |
| 381 | return 0; |
| 382 | |
| 383 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 384 | return v; |
| 385 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 386 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 387 | |
| 388 | /** |
| 389 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 390 | * @sf: seq_file to print to |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 391 | * @pdata: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 392 | * @rwstat: rwstat to print |
| 393 | * |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 394 | * Print @rwstat to @sf for the device assocaited with @pdata. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 395 | */ |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 396 | u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 397 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 398 | { |
| 399 | static const char *rwstr[] = { |
| 400 | [BLKG_RWSTAT_READ] = "Read", |
| 401 | [BLKG_RWSTAT_WRITE] = "Write", |
| 402 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 403 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 404 | }; |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 405 | const char *dname = blkg_dev_name(pdata_to_blkg(pdata)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 406 | u64 v; |
| 407 | int i; |
| 408 | |
| 409 | if (!dname) |
| 410 | return 0; |
| 411 | |
| 412 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 413 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 414 | (unsigned long long)rwstat->cnt[i]); |
| 415 | |
| 416 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 417 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 418 | return v; |
| 419 | } |
| 420 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 421 | /** |
| 422 | * blkg_prfill_stat - prfill callback for blkg_stat |
| 423 | * @sf: seq_file to print to |
| 424 | * @pdata: policy private data of interest |
| 425 | * @off: offset to the blkg_stat in @pdata |
| 426 | * |
| 427 | * prfill callback for printing a blkg_stat. |
| 428 | */ |
| 429 | u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 430 | { |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 431 | return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 432 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 433 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 434 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 435 | /** |
| 436 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
| 437 | * @sf: seq_file to print to |
| 438 | * @pdata: policy private data of interest |
| 439 | * @off: offset to the blkg_rwstat in @pdata |
| 440 | * |
| 441 | * prfill callback for printing a blkg_rwstat. |
| 442 | */ |
| 443 | u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 444 | { |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 445 | struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 446 | |
Tejun Heo | d366e7e | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 447 | return __blkg_prfill_rwstat(sf, pdata, &rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 448 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 449 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 450 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 451 | /** |
| 452 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 453 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 454 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 455 | * @input: input string |
| 456 | * @ctx: blkg_conf_ctx to be filled |
| 457 | * |
| 458 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 459 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 460 | * value. This function returns with RCU read lock and queue lock held and |
| 461 | * must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 462 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 463 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 464 | const char *input, struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 465 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 466 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 467 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 468 | struct blkcg_gq *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 469 | unsigned int major, minor; |
| 470 | unsigned long long v; |
| 471 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 472 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 473 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 474 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 475 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 476 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 477 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 478 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 479 | |
| 480 | rcu_read_lock(); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 481 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 482 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 483 | if (blkcg_policy_enabled(disk->queue, pol)) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 484 | blkg = blkg_lookup_create(blkcg, disk->queue); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 485 | else |
| 486 | blkg = ERR_PTR(-EINVAL); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 487 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 488 | if (IS_ERR(blkg)) { |
| 489 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 490 | rcu_read_unlock(); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 491 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 492 | put_disk(disk); |
| 493 | /* |
| 494 | * If queue was bypassing, we should retry. Do so after a |
| 495 | * short msleep(). It isn't strictly necessary but queue |
| 496 | * can be bypassing for some time and it's always nice to |
| 497 | * avoid busy looping. |
| 498 | */ |
| 499 | if (ret == -EBUSY) { |
| 500 | msleep(10); |
| 501 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 502 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 503 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 504 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 505 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 506 | ctx->disk = disk; |
| 507 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 508 | ctx->v = v; |
| 509 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 510 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 511 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 512 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 513 | /** |
| 514 | * blkg_conf_finish - finish up per-blkg config update |
| 515 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 516 | * |
| 517 | * Finish up after per-blkg config update. This function must be paired |
| 518 | * with blkg_conf_prep(). |
| 519 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 520 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 521 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 522 | { |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 523 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 524 | rcu_read_unlock(); |
| 525 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 526 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 527 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 528 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 529 | struct cftype blkcg_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 530 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 531 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 532 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 533 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 534 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 535 | }; |
| 536 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 537 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 538 | * blkcg_pre_destroy - cgroup pre_destroy callback |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 539 | * @cgroup: cgroup of interest |
| 540 | * |
| 541 | * This function is called when @cgroup is about to go away and responsible |
| 542 | * for shooting down all blkgs associated with @cgroup. blkgs should be |
| 543 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 544 | * inside q lock, this function performs reverse double lock dancing. |
| 545 | * |
| 546 | * This is the blkcg counterpart of ioc_release_fn(). |
| 547 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 548 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 549 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 550 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 551 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 552 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 553 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 554 | while (!hlist_empty(&blkcg->blkg_list)) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 555 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 556 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 557 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 558 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 559 | if (spin_trylock(q->queue_lock)) { |
| 560 | blkg_destroy(blkg); |
| 561 | spin_unlock(q->queue_lock); |
| 562 | } else { |
| 563 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 564 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 565 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 566 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 567 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 568 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 569 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 570 | return 0; |
| 571 | } |
| 572 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 573 | static void blkcg_destroy(struct cgroup *cgroup) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 574 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 575 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 576 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 577 | if (blkcg != &blkcg_root) |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 578 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 579 | } |
| 580 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 581 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 582 | { |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 583 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 584 | struct blkcg *blkcg; |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 585 | struct cgroup *parent = cgroup->parent; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 586 | |
Li Zefan | 0341509 | 2010-05-07 08:57:00 +0200 | [diff] [blame] | 587 | if (!parent) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 588 | blkcg = &blkcg_root; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 589 | goto done; |
| 590 | } |
| 591 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 592 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 593 | if (!blkcg) |
| 594 | return ERR_PTR(-ENOMEM); |
| 595 | |
Tejun Heo | 3381cb8 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 596 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 597 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 598 | done: |
| 599 | spin_lock_init(&blkcg->lock); |
| 600 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
| 601 | |
| 602 | return &blkcg->css; |
| 603 | } |
| 604 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 605 | /** |
| 606 | * blkcg_init_queue - initialize blkcg part of request queue |
| 607 | * @q: request_queue to initialize |
| 608 | * |
| 609 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 610 | * part of new request_queue @q. |
| 611 | * |
| 612 | * RETURNS: |
| 613 | * 0 on success, -errno on failure. |
| 614 | */ |
| 615 | int blkcg_init_queue(struct request_queue *q) |
| 616 | { |
| 617 | might_sleep(); |
| 618 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 619 | return blk_throtl_init(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 620 | } |
| 621 | |
| 622 | /** |
| 623 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 624 | * @q: request_queue to drain |
| 625 | * |
| 626 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 627 | */ |
| 628 | void blkcg_drain_queue(struct request_queue *q) |
| 629 | { |
| 630 | lockdep_assert_held(q->queue_lock); |
| 631 | |
| 632 | blk_throtl_drain(q); |
| 633 | } |
| 634 | |
| 635 | /** |
| 636 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 637 | * @q: request_queue being released |
| 638 | * |
| 639 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 640 | */ |
| 641 | void blkcg_exit_queue(struct request_queue *q) |
| 642 | { |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 643 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 644 | blkg_destroy_all(q); |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 645 | spin_unlock_irq(q->queue_lock); |
| 646 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 647 | blk_throtl_exit(q); |
| 648 | } |
| 649 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 650 | /* |
| 651 | * We cannot support shared io contexts, as we have no mean to support |
| 652 | * two tasks with the same ioc in two different groups without major rework |
| 653 | * of the main cic data structures. For now we allow a task to change |
| 654 | * its cgroup only if it's the only owner of its ioc. |
| 655 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 656 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 657 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 658 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 659 | struct io_context *ioc; |
| 660 | int ret = 0; |
| 661 | |
| 662 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 663 | cgroup_taskset_for_each(task, cgrp, tset) { |
| 664 | task_lock(task); |
| 665 | ioc = task->io_context; |
| 666 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 667 | ret = -EINVAL; |
| 668 | task_unlock(task); |
| 669 | if (ret) |
| 670 | break; |
| 671 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 672 | return ret; |
| 673 | } |
| 674 | |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 675 | struct cgroup_subsys blkio_subsys = { |
| 676 | .name = "blkio", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 677 | .create = blkcg_create, |
| 678 | .can_attach = blkcg_can_attach, |
| 679 | .pre_destroy = blkcg_pre_destroy, |
| 680 | .destroy = blkcg_destroy, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 681 | .subsys_id = blkio_subsys_id, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 682 | .base_cftypes = blkcg_files, |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 683 | .module = THIS_MODULE, |
| 684 | }; |
| 685 | EXPORT_SYMBOL_GPL(blkio_subsys); |
| 686 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 687 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 688 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 689 | * @q: request_queue of interest |
| 690 | * @pol: blkcg policy to activate |
| 691 | * |
| 692 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 693 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 694 | * |
| 695 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 696 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 697 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 698 | * always enough for dereferencing policy data. |
| 699 | * |
| 700 | * The caller is responsible for synchronizing [de]activations and policy |
| 701 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 702 | */ |
| 703 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 704 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 705 | { |
| 706 | LIST_HEAD(pds); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 707 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 708 | struct blkg_policy_data *pd, *n; |
| 709 | int cnt = 0, ret; |
| 710 | |
| 711 | if (blkcg_policy_enabled(q, pol)) |
| 712 | return 0; |
| 713 | |
| 714 | blk_queue_bypass_start(q); |
| 715 | |
| 716 | /* make sure the root blkg exists and count the existing blkgs */ |
| 717 | spin_lock_irq(q->queue_lock); |
| 718 | |
| 719 | rcu_read_lock(); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 720 | blkg = __blkg_lookup_create(&blkcg_root, q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 721 | rcu_read_unlock(); |
| 722 | |
| 723 | if (IS_ERR(blkg)) { |
| 724 | ret = PTR_ERR(blkg); |
| 725 | goto out_unlock; |
| 726 | } |
| 727 | q->root_blkg = blkg; |
| 728 | |
| 729 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
| 730 | cnt++; |
| 731 | |
| 732 | spin_unlock_irq(q->queue_lock); |
| 733 | |
| 734 | /* allocate policy_data for all existing blkgs */ |
| 735 | while (cnt--) { |
| 736 | pd = kzalloc_node(blkg_pd_size(pol), GFP_KERNEL, q->node); |
| 737 | if (!pd) { |
| 738 | ret = -ENOMEM; |
| 739 | goto out_free; |
| 740 | } |
| 741 | list_add_tail(&pd->alloc_node, &pds); |
| 742 | } |
| 743 | |
| 744 | /* |
| 745 | * Install the allocated pds. With @q bypassing, no new blkg |
| 746 | * should have been created while the queue lock was dropped. |
| 747 | */ |
| 748 | spin_lock_irq(q->queue_lock); |
| 749 | |
| 750 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 751 | if (WARN_ON(list_empty(&pds))) { |
| 752 | /* umm... this shouldn't happen, just abort */ |
| 753 | ret = -ENOMEM; |
| 754 | goto out_unlock; |
| 755 | } |
| 756 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); |
| 757 | list_del_init(&pd->alloc_node); |
| 758 | |
| 759 | /* grab blkcg lock too while installing @pd on @blkg */ |
| 760 | spin_lock(&blkg->blkcg->lock); |
| 761 | |
| 762 | blkg->pd[pol->plid] = pd; |
| 763 | pd->blkg = blkg; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 764 | pol->ops.pd_init_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 765 | |
| 766 | spin_unlock(&blkg->blkcg->lock); |
| 767 | } |
| 768 | |
| 769 | __set_bit(pol->plid, q->blkcg_pols); |
| 770 | ret = 0; |
| 771 | out_unlock: |
| 772 | spin_unlock_irq(q->queue_lock); |
| 773 | out_free: |
| 774 | blk_queue_bypass_end(q); |
| 775 | list_for_each_entry_safe(pd, n, &pds, alloc_node) |
| 776 | kfree(pd); |
| 777 | return ret; |
| 778 | } |
| 779 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 780 | |
| 781 | /** |
| 782 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 783 | * @q: request_queue of interest |
| 784 | * @pol: blkcg policy to deactivate |
| 785 | * |
| 786 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 787 | * blkcg_activate_policy(). |
| 788 | */ |
| 789 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 790 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 791 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 792 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 793 | |
| 794 | if (!blkcg_policy_enabled(q, pol)) |
| 795 | return; |
| 796 | |
| 797 | blk_queue_bypass_start(q); |
| 798 | spin_lock_irq(q->queue_lock); |
| 799 | |
| 800 | __clear_bit(pol->plid, q->blkcg_pols); |
| 801 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 802 | /* if no policy is left, no need for blkgs - shoot them down */ |
| 803 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) |
| 804 | blkg_destroy_all(q); |
| 805 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 806 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 807 | /* grab blkcg lock too while removing @pd from @blkg */ |
| 808 | spin_lock(&blkg->blkcg->lock); |
| 809 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 810 | if (pol->ops.pd_exit_fn) |
| 811 | pol->ops.pd_exit_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 812 | |
| 813 | kfree(blkg->pd[pol->plid]); |
| 814 | blkg->pd[pol->plid] = NULL; |
| 815 | |
| 816 | spin_unlock(&blkg->blkcg->lock); |
| 817 | } |
| 818 | |
| 819 | spin_unlock_irq(q->queue_lock); |
| 820 | blk_queue_bypass_end(q); |
| 821 | } |
| 822 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 823 | |
| 824 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 825 | * blkcg_policy_register - register a blkcg policy |
| 826 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 827 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 828 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 829 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 830 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 831 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 832 | { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 833 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 834 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 835 | mutex_lock(&blkcg_pol_mutex); |
| 836 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 837 | /* find an empty slot */ |
| 838 | ret = -ENOSPC; |
| 839 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 840 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 841 | break; |
| 842 | if (i >= BLKCG_MAX_POLS) |
| 843 | goto out_unlock; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 844 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 845 | /* register and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 846 | pol->plid = i; |
| 847 | blkcg_policy[i] = pol; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 848 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 849 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 850 | if (pol->cftypes) |
| 851 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 852 | ret = 0; |
| 853 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 854 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 855 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 856 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 857 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 858 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 859 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 860 | * blkcg_policy_unregister - unregister a blkcg policy |
| 861 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 862 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 863 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 864 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 865 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 866 | { |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 867 | mutex_lock(&blkcg_pol_mutex); |
| 868 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 869 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 870 | goto out_unlock; |
| 871 | |
| 872 | /* kill the intf files first */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 873 | if (pol->cftypes) |
| 874 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 875 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 876 | /* unregister and update blkgs */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 877 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 878 | out_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 879 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 880 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame^] | 881 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |