Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common Block IO controller cgroup interface |
| 3 | * |
| 4 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 11 | * Nauman Rafique <nauman@google.com> |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 12 | * |
| 13 | * For policy-specific per-blkcg data: |
| 14 | * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> |
| 15 | * Arianna Avanzini <avanzini.arianna@gmail.com> |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 16 | */ |
| 17 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 18 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 19 | #include <linux/module.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 20 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 21 | #include <linux/blkdev.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 22 | #include <linux/backing-dev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 24 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 25 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 26 | #include <linux/atomic.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 27 | #include <linux/blk-cgroup.h> |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 28 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 29 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 30 | #define MAX_KEY_LEN 100 |
| 31 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 32 | /* |
| 33 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. |
| 34 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire |
| 35 | * policy [un]register operations including cgroup file additions / |
| 36 | * removals. Putting cgroup file registration outside blkcg_pol_mutex |
| 37 | * allows grabbing it from cgroup callbacks. |
| 38 | */ |
| 39 | static DEFINE_MUTEX(blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 40 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 41 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 42 | struct blkcg blkcg_root; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 43 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 44 | |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 45 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
| 46 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 47 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 48 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 49 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
| 50 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 51 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 52 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 53 | { |
| 54 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 55 | } |
| 56 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 57 | /** |
| 58 | * blkg_free - free a blkg |
| 59 | * @blkg: blkg to free |
| 60 | * |
| 61 | * Free @blkg which may be partially allocated. |
| 62 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 63 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 64 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 65 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 66 | |
| 67 | if (!blkg) |
| 68 | return; |
| 69 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 70 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
| 71 | kfree(blkg->pd[i]); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 72 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 73 | blk_exit_rl(&blkg->rl); |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 74 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | /** |
| 78 | * blkg_alloc - allocate a blkg |
| 79 | * @blkcg: block cgroup the new blkg is associated with |
| 80 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 81 | * @gfp_mask: allocation mask to use |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 82 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 83 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 84 | */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 85 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
| 86 | gfp_t gfp_mask) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 87 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 88 | struct blkcg_gq *blkg; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 89 | int i; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 90 | |
| 91 | /* alloc and init base part */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 92 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 93 | if (!blkg) |
| 94 | return NULL; |
| 95 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 96 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 97 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 98 | blkg->blkcg = blkcg; |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 99 | atomic_set(&blkg->refcnt, 1); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 100 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 101 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
| 102 | if (blkcg != &blkcg_root) { |
| 103 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) |
| 104 | goto err_free; |
| 105 | blkg->rl.blkg = blkg; |
| 106 | } |
| 107 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 108 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 109 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 110 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 111 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 112 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 113 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 114 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 115 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 116 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 117 | if (!pd) |
| 118 | goto err_free; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 119 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 120 | blkg->pd[i] = pd; |
| 121 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 122 | pd->plid = i; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 123 | } |
| 124 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 125 | return blkg; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 126 | |
| 127 | err_free: |
| 128 | blkg_free(blkg); |
| 129 | return NULL; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 130 | } |
| 131 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 132 | /** |
| 133 | * __blkg_lookup - internal version of blkg_lookup() |
| 134 | * @blkcg: blkcg of interest |
| 135 | * @q: request_queue of interest |
| 136 | * @update_hint: whether to update lookup hint with the result or not |
| 137 | * |
| 138 | * This is internal version and shouldn't be used by policy |
| 139 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of |
| 140 | * @q's bypass state. If @update_hint is %true, the caller should be |
| 141 | * holding @q->queue_lock and lookup hint is updated on success. |
| 142 | */ |
Tejun Heo | dd4a4ff | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 143 | struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, |
| 144 | bool update_hint) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 145 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 146 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 147 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 148 | blkg = rcu_dereference(blkcg->blkg_hint); |
| 149 | if (blkg && blkg->q == q) |
| 150 | return blkg; |
| 151 | |
| 152 | /* |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 153 | * Hint didn't match. Look up from the radix tree. Note that the |
| 154 | * hint can only be updated under queue_lock as otherwise @blkg |
| 155 | * could have already been removed from blkg_tree. The caller is |
| 156 | * responsible for grabbing queue_lock if @update_hint. |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 157 | */ |
| 158 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 159 | if (blkg && blkg->q == q) { |
| 160 | if (update_hint) { |
| 161 | lockdep_assert_held(q->queue_lock); |
| 162 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
| 163 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 164 | return blkg; |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 165 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 166 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 167 | return NULL; |
| 168 | } |
| 169 | |
| 170 | /** |
| 171 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 172 | * @blkcg: blkcg of interest |
| 173 | * @q: request_queue of interest |
| 174 | * |
| 175 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 176 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 177 | * - see blk_queue_bypass_start() for details. |
| 178 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 179 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 180 | { |
| 181 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 182 | |
| 183 | if (unlikely(blk_queue_bypass(q))) |
| 184 | return NULL; |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 185 | return __blkg_lookup(blkcg, q, false); |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 186 | } |
| 187 | EXPORT_SYMBOL_GPL(blkg_lookup); |
| 188 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 189 | /* |
| 190 | * If @new_blkg is %NULL, this function tries to allocate a new one as |
| 191 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. |
| 192 | */ |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 193 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
| 194 | struct request_queue *q, |
| 195 | struct blkcg_gq *new_blkg) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 196 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 197 | struct blkcg_gq *blkg; |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 198 | struct bdi_writeback_congested *wb_congested; |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 199 | int i, ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 200 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 201 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 202 | lockdep_assert_held(q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 203 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 204 | /* blkg holds a reference to blkcg */ |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 205 | if (!css_tryget_online(&blkcg->css)) { |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 206 | ret = -EINVAL; |
| 207 | goto err_free_blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 208 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 209 | |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 210 | wb_congested = wb_congested_get_create(&q->backing_dev_info, |
| 211 | blkcg->css.id, GFP_ATOMIC); |
| 212 | if (!wb_congested) { |
| 213 | ret = -ENOMEM; |
| 214 | goto err_put_css; |
| 215 | } |
| 216 | |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 217 | /* allocate */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 218 | if (!new_blkg) { |
| 219 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); |
| 220 | if (unlikely(!new_blkg)) { |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 221 | ret = -ENOMEM; |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 222 | goto err_put_congested; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 223 | } |
| 224 | } |
| 225 | blkg = new_blkg; |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 226 | blkg->wb_congested = wb_congested; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 227 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 228 | /* link parent */ |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 229 | if (blkcg_parent(blkcg)) { |
| 230 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); |
| 231 | if (WARN_ON_ONCE(!blkg->parent)) { |
Tejun Heo | 2423c9c | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 232 | ret = -EINVAL; |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 233 | goto err_put_congested; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 234 | } |
| 235 | blkg_get(blkg->parent); |
| 236 | } |
| 237 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 238 | /* invoke per-policy init */ |
| 239 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 240 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 241 | |
| 242 | if (blkg->pd[i] && pol->pd_init_fn) |
| 243 | pol->pd_init_fn(blkg); |
| 244 | } |
| 245 | |
| 246 | /* insert */ |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 247 | spin_lock(&blkcg->lock); |
| 248 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
| 249 | if (likely(!ret)) { |
| 250 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 251 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 252 | |
| 253 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 254 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 255 | |
| 256 | if (blkg->pd[i] && pol->pd_online_fn) |
| 257 | pol->pd_online_fn(blkg); |
| 258 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 259 | } |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 260 | blkg->online = true; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 261 | spin_unlock(&blkcg->lock); |
| 262 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 263 | if (!ret) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 264 | return blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 265 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 266 | /* @blkg failed fully initialized, use the usual release path */ |
| 267 | blkg_put(blkg); |
| 268 | return ERR_PTR(ret); |
| 269 | |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 270 | err_put_congested: |
| 271 | wb_congested_put(wb_congested); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 272 | err_put_css: |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 273 | css_put(&blkcg->css); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 274 | err_free_blkg: |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 275 | blkg_free(new_blkg); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 276 | return ERR_PTR(ret); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 277 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 278 | |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 279 | /** |
| 280 | * blkg_lookup_create - lookup blkg, try to create one if not there |
| 281 | * @blkcg: blkcg of interest |
| 282 | * @q: request_queue of interest |
| 283 | * |
| 284 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 285 | * create one. blkg creation is performed recursively from blkcg_root such |
| 286 | * that all non-root blkg's have access to the parent blkg. This function |
| 287 | * should be called under RCU read lock and @q->queue_lock. |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 288 | * |
| 289 | * Returns pointer to the looked up or created blkg on success, ERR_PTR() |
| 290 | * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not |
| 291 | * dead and bypassing, returns ERR_PTR(-EBUSY). |
| 292 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 293 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 294 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 295 | { |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 296 | struct blkcg_gq *blkg; |
| 297 | |
| 298 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 299 | lockdep_assert_held(q->queue_lock); |
| 300 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 301 | /* |
| 302 | * This could be the first entry point of blkcg implementation and |
| 303 | * we shouldn't allow anything to go through for a bypassing queue. |
| 304 | */ |
| 305 | if (unlikely(blk_queue_bypass(q))) |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 306 | return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 307 | |
| 308 | blkg = __blkg_lookup(blkcg, q, true); |
| 309 | if (blkg) |
| 310 | return blkg; |
| 311 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 312 | /* |
| 313 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
| 314 | * non-root blkgs have access to their parents. |
| 315 | */ |
| 316 | while (true) { |
| 317 | struct blkcg *pos = blkcg; |
| 318 | struct blkcg *parent = blkcg_parent(blkcg); |
| 319 | |
| 320 | while (parent && !__blkg_lookup(parent, q, false)) { |
| 321 | pos = parent; |
| 322 | parent = blkcg_parent(parent); |
| 323 | } |
| 324 | |
| 325 | blkg = blkg_create(pos, q, NULL); |
| 326 | if (pos == blkcg || IS_ERR(blkg)) |
| 327 | return blkg; |
| 328 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 329 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 330 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 331 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 332 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 333 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 334 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 335 | int i; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 336 | |
Tejun Heo | 27e1f9d | 2012-06-05 13:36:44 +0200 | [diff] [blame] | 337 | lockdep_assert_held(blkg->q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 338 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 339 | |
| 340 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 341 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 342 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 343 | |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 344 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 345 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 346 | |
| 347 | if (blkg->pd[i] && pol->pd_offline_fn) |
| 348 | pol->pd_offline_fn(blkg); |
| 349 | } |
| 350 | blkg->online = false; |
| 351 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 352 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 353 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 354 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 355 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 356 | /* |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 357 | * Both setting lookup hint to and clearing it from @blkg are done |
| 358 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 359 | * will. Hint assignment itself can race safely. |
| 360 | */ |
Paul E. McKenney | ec6c676a | 2014-02-17 13:35:57 -0800 | [diff] [blame] | 361 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 362 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 363 | |
| 364 | /* |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 365 | * Put the reference taken at the time of creation so that when all |
| 366 | * queues are gone, group can be destroyed. |
| 367 | */ |
| 368 | blkg_put(blkg); |
| 369 | } |
| 370 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 371 | /** |
| 372 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 373 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 374 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 375 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 376 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 377 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 378 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 379 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 380 | |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 381 | lockdep_assert_held(q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 382 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 383 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 384 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 385 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 386 | spin_lock(&blkcg->lock); |
| 387 | blkg_destroy(blkg); |
| 388 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 389 | } |
| 390 | } |
| 391 | |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 392 | /* |
| 393 | * A group is RCU protected, but having an rcu lock does not mean that one |
| 394 | * can access all the fields of blkg and assume these are valid. For |
| 395 | * example, don't try to follow throtl_data and request queue links. |
| 396 | * |
| 397 | * Having a reference to blkg under an rcu allows accesses to only values |
| 398 | * local to groups like group stats and group rate limits. |
| 399 | */ |
| 400 | void __blkg_release_rcu(struct rcu_head *rcu_head) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 401 | { |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 402 | struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 403 | int i; |
| 404 | |
| 405 | /* tell policies that this one is being freed */ |
| 406 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 407 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 408 | |
| 409 | if (blkg->pd[i] && pol->pd_exit_fn) |
| 410 | pol->pd_exit_fn(blkg); |
| 411 | } |
| 412 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 413 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 414 | css_put(&blkg->blkcg->css); |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 415 | if (blkg->parent) |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 416 | blkg_put(blkg->parent); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 417 | |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 418 | wb_congested_put(blkg->wb_congested); |
| 419 | |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 420 | blkg_free(blkg); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 421 | } |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 422 | EXPORT_SYMBOL_GPL(__blkg_release_rcu); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 423 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 424 | /* |
| 425 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky |
| 426 | * because the root blkg uses @q->root_rl instead of its own rl. |
| 427 | */ |
| 428 | struct request_list *__blk_queue_next_rl(struct request_list *rl, |
| 429 | struct request_queue *q) |
| 430 | { |
| 431 | struct list_head *ent; |
| 432 | struct blkcg_gq *blkg; |
| 433 | |
| 434 | /* |
| 435 | * Determine the current blkg list_head. The first entry is |
| 436 | * root_rl which is off @q->blkg_list and mapped to the head. |
| 437 | */ |
| 438 | if (rl == &q->root_rl) { |
| 439 | ent = &q->blkg_list; |
Jun'ichi Nomura | 65c77fd | 2012-10-22 10:15:37 +0900 | [diff] [blame] | 440 | /* There are no more block groups, hence no request lists */ |
| 441 | if (list_empty(ent)) |
| 442 | return NULL; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 443 | } else { |
| 444 | blkg = container_of(rl, struct blkcg_gq, rl); |
| 445 | ent = &blkg->q_node; |
| 446 | } |
| 447 | |
| 448 | /* walk to the next list_head, skip root blkcg */ |
| 449 | ent = ent->next; |
| 450 | if (ent == &q->root_blkg->q_node) |
| 451 | ent = ent->next; |
| 452 | if (ent == &q->blkg_list) |
| 453 | return NULL; |
| 454 | |
| 455 | blkg = container_of(ent, struct blkcg_gq, q_node); |
| 456 | return &blkg->rl; |
| 457 | } |
| 458 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 459 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
| 460 | struct cftype *cftype, u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 461 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 462 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 463 | struct blkcg_gq *blkg; |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 464 | int i; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 465 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 466 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 467 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 468 | |
| 469 | /* |
| 470 | * Note that stat reset is racy - it doesn't synchronize against |
| 471 | * stat updates. This is a debug feature which shouldn't exist |
| 472 | * anyway. If you get hit by a race, retry. |
| 473 | */ |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 474 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 475 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 476 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 477 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 478 | if (blkcg_policy_enabled(blkg->q, pol) && |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 479 | pol->pd_reset_stats_fn) |
| 480 | pol->pd_reset_stats_fn(blkg); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 481 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 482 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 483 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 484 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 485 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 486 | return 0; |
| 487 | } |
| 488 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 489 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 490 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 491 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
| 492 | if (blkg->q->backing_dev_info.dev) |
| 493 | return dev_name(blkg->q->backing_dev_info.dev); |
| 494 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 495 | } |
| 496 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 497 | /** |
| 498 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 499 | * @sf: seq_file to print to |
| 500 | * @blkcg: blkcg of interest |
| 501 | * @prfill: fill function to print out a blkg |
| 502 | * @pol: policy in question |
| 503 | * @data: data to be passed to @prfill |
| 504 | * @show_total: to print out sum of prfill return values or not |
| 505 | * |
| 506 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 507 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 508 | * policy data and @data and the matching queue lock held. If @show_total |
| 509 | * is %true, the sum of the return values from @prfill is printed with |
| 510 | * "Total" label at the end. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 511 | * |
| 512 | * This is to be used to construct print functions for |
| 513 | * cftype->read_seq_string method. |
| 514 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 515 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 516 | u64 (*prfill)(struct seq_file *, |
| 517 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 518 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 519 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 520 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 521 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 522 | u64 total = 0; |
| 523 | |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 524 | rcu_read_lock(); |
Linus Torvalds | ee89f81 | 2013-02-28 12:52:24 -0800 | [diff] [blame] | 525 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 526 | spin_lock_irq(blkg->q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 527 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 528 | total += prfill(sf, blkg->pd[pol->plid], data); |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 529 | spin_unlock_irq(blkg->q->queue_lock); |
| 530 | } |
| 531 | rcu_read_unlock(); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 532 | |
| 533 | if (show_total) |
| 534 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 535 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 536 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 537 | |
| 538 | /** |
| 539 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 540 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 541 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 542 | * @v: value to print |
| 543 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 544 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 545 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 546 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 547 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 548 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 549 | |
| 550 | if (!dname) |
| 551 | return 0; |
| 552 | |
| 553 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 554 | return v; |
| 555 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 556 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 557 | |
| 558 | /** |
| 559 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat |
| 560 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 561 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 562 | * @rwstat: rwstat to print |
| 563 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 564 | * Print @rwstat to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 565 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 566 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 567 | const struct blkg_rwstat *rwstat) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 568 | { |
| 569 | static const char *rwstr[] = { |
| 570 | [BLKG_RWSTAT_READ] = "Read", |
| 571 | [BLKG_RWSTAT_WRITE] = "Write", |
| 572 | [BLKG_RWSTAT_SYNC] = "Sync", |
| 573 | [BLKG_RWSTAT_ASYNC] = "Async", |
| 574 | }; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 575 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 576 | u64 v; |
| 577 | int i; |
| 578 | |
| 579 | if (!dname) |
| 580 | return 0; |
| 581 | |
| 582 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 583 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], |
| 584 | (unsigned long long)rwstat->cnt[i]); |
| 585 | |
| 586 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; |
| 587 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
| 588 | return v; |
| 589 | } |
Tejun Heo | b50da39 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 590 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 591 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 592 | /** |
| 593 | * blkg_prfill_stat - prfill callback for blkg_stat |
| 594 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 595 | * @pd: policy private data of interest |
| 596 | * @off: offset to the blkg_stat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 597 | * |
| 598 | * prfill callback for printing a blkg_stat. |
| 599 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 600 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 601 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 602 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 603 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 604 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 605 | |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 606 | /** |
| 607 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat |
| 608 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 609 | * @pd: policy private data of interest |
| 610 | * @off: offset to the blkg_rwstat in @pd |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 611 | * |
| 612 | * prfill callback for printing a blkg_rwstat. |
| 613 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 614 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 615 | int off) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 616 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 617 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 618 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 619 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 620 | } |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 621 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 622 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 623 | /** |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 624 | * blkg_stat_recursive_sum - collect hierarchical blkg_stat |
| 625 | * @pd: policy private data of interest |
| 626 | * @off: offset to the blkg_stat in @pd |
| 627 | * |
| 628 | * Collect the blkg_stat specified by @off from @pd and all its online |
| 629 | * descendants and return the sum. The caller must be holding the queue |
| 630 | * lock for online tests. |
| 631 | */ |
| 632 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) |
| 633 | { |
| 634 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; |
| 635 | struct blkcg_gq *pos_blkg; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 636 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 637 | u64 sum = 0; |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 638 | |
| 639 | lockdep_assert_held(pd->blkg->q->queue_lock); |
| 640 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 641 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 642 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 643 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
| 644 | struct blkg_stat *stat = (void *)pos_pd + off; |
| 645 | |
| 646 | if (pos_blkg->online) |
| 647 | sum += blkg_stat_read(stat); |
| 648 | } |
| 649 | rcu_read_unlock(); |
| 650 | |
| 651 | return sum; |
| 652 | } |
| 653 | EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); |
| 654 | |
| 655 | /** |
| 656 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat |
| 657 | * @pd: policy private data of interest |
| 658 | * @off: offset to the blkg_stat in @pd |
| 659 | * |
| 660 | * Collect the blkg_rwstat specified by @off from @pd and all its online |
| 661 | * descendants and return the sum. The caller must be holding the queue |
| 662 | * lock for online tests. |
| 663 | */ |
| 664 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, |
| 665 | int off) |
| 666 | { |
| 667 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; |
| 668 | struct blkcg_gq *pos_blkg; |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 669 | struct cgroup_subsys_state *pos_css; |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 670 | struct blkg_rwstat sum = { }; |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 671 | int i; |
| 672 | |
| 673 | lockdep_assert_held(pd->blkg->q->queue_lock); |
| 674 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 675 | rcu_read_lock(); |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 676 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 677 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
| 678 | struct blkg_rwstat *rwstat = (void *)pos_pd + off; |
| 679 | struct blkg_rwstat tmp; |
| 680 | |
| 681 | if (!pos_blkg->online) |
| 682 | continue; |
| 683 | |
| 684 | tmp = blkg_rwstat_read(rwstat); |
| 685 | |
| 686 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 687 | sum.cnt[i] += tmp.cnt[i]; |
| 688 | } |
| 689 | rcu_read_unlock(); |
| 690 | |
| 691 | return sum; |
| 692 | } |
| 693 | EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); |
| 694 | |
| 695 | /** |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 696 | * blkg_conf_prep - parse and prepare for per-blkg config update |
| 697 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 698 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 699 | * @input: input string |
| 700 | * @ctx: blkg_conf_ctx to be filled |
| 701 | * |
| 702 | * Parse per-blkg config update from @input and initialize @ctx with the |
| 703 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 704 | * value. This function returns with RCU read lock and queue lock held and |
| 705 | * must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 706 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 707 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 708 | const char *input, struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 709 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 710 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 711 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 712 | struct blkcg_gq *blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 713 | unsigned int major, minor; |
| 714 | unsigned long long v; |
| 715 | int part, ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 716 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 717 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
| 718 | return -EINVAL; |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 719 | |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 720 | disk = get_gendisk(MKDEV(major, minor), &part); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 721 | if (!disk || part) |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 722 | return -EINVAL; |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 723 | |
| 724 | rcu_read_lock(); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 725 | spin_lock_irq(disk->queue->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 726 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 727 | if (blkcg_policy_enabled(disk->queue, pol)) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 728 | blkg = blkg_lookup_create(blkcg, disk->queue); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 729 | else |
| 730 | blkg = ERR_PTR(-EINVAL); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 731 | |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 732 | if (IS_ERR(blkg)) { |
| 733 | ret = PTR_ERR(blkg); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 734 | rcu_read_unlock(); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 735 | spin_unlock_irq(disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 736 | put_disk(disk); |
| 737 | /* |
| 738 | * If queue was bypassing, we should retry. Do so after a |
| 739 | * short msleep(). It isn't strictly necessary but queue |
| 740 | * can be bypassing for some time and it's always nice to |
| 741 | * avoid busy looping. |
| 742 | */ |
| 743 | if (ret == -EBUSY) { |
| 744 | msleep(10); |
| 745 | ret = restart_syscall(); |
Vivek Goyal | 7702e8f | 2010-09-15 17:06:36 -0400 | [diff] [blame] | 746 | } |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 747 | return ret; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 748 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 749 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 750 | ctx->disk = disk; |
| 751 | ctx->blkg = blkg; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 752 | ctx->v = v; |
| 753 | return 0; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 754 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 755 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 756 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 757 | /** |
| 758 | * blkg_conf_finish - finish up per-blkg config update |
| 759 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 760 | * |
| 761 | * Finish up after per-blkg config update. This function must be paired |
| 762 | * with blkg_conf_prep(). |
| 763 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 764 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 765 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 766 | { |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 767 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 768 | rcu_read_unlock(); |
| 769 | put_disk(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 770 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 771 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 772 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 773 | struct cftype blkcg_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 774 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 775 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 776 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 777 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 778 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 779 | }; |
| 780 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 781 | /** |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 782 | * blkcg_css_offline - cgroup css_offline callback |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 783 | * @css: css of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 784 | * |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 785 | * This function is called when @css is about to go away and responsible |
| 786 | * for shooting down all blkgs associated with @css. blkgs should be |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 787 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
| 788 | * inside q lock, this function performs reverse double lock dancing. |
| 789 | * |
| 790 | * This is the blkcg counterpart of ioc_release_fn(). |
| 791 | */ |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 792 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 793 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 794 | struct blkcg *blkcg = css_to_blkcg(css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 795 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 796 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 797 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 798 | while (!hlist_empty(&blkcg->blkg_list)) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 799 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 800 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 801 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 802 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 803 | if (spin_trylock(q->queue_lock)) { |
| 804 | blkg_destroy(blkg); |
| 805 | spin_unlock(q->queue_lock); |
| 806 | } else { |
| 807 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 808 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 809 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 810 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 811 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 812 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 813 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 814 | |
| 815 | wb_blkcg_offline(blkcg); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 816 | } |
| 817 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 818 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 819 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 820 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 821 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 822 | mutex_lock(&blkcg_pol_mutex); |
| 823 | list_del(&blkcg->all_blkcgs_node); |
| 824 | mutex_unlock(&blkcg_pol_mutex); |
| 825 | |
Arianna Avanzini | a322baa | 2015-07-07 03:08:15 +0200 | [diff] [blame] | 826 | if (blkcg != &blkcg_root) { |
| 827 | int i; |
| 828 | |
| 829 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
| 830 | kfree(blkcg->pd[i]); |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 831 | kfree(blkcg); |
Arianna Avanzini | a322baa | 2015-07-07 03:08:15 +0200 | [diff] [blame] | 832 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 833 | } |
| 834 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 835 | static struct cgroup_subsys_state * |
| 836 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 837 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 838 | struct blkcg *blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 839 | struct cgroup_subsys_state *ret; |
| 840 | int i; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 841 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 842 | mutex_lock(&blkcg_pol_mutex); |
| 843 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 844 | if (!parent_css) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 845 | blkcg = &blkcg_root; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 846 | goto done; |
| 847 | } |
| 848 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 849 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 850 | if (!blkcg) { |
| 851 | ret = ERR_PTR(-ENOMEM); |
| 852 | goto free_blkcg; |
| 853 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 854 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 855 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { |
| 856 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 857 | struct blkcg_policy_data *cpd; |
| 858 | |
| 859 | /* |
| 860 | * If the policy hasn't been attached yet, wait for it |
| 861 | * to be attached before doing anything else. Otherwise, |
| 862 | * check if the policy requires any specific per-cgroup |
| 863 | * data: if it does, allocate and initialize it. |
| 864 | */ |
| 865 | if (!pol || !pol->cpd_size) |
| 866 | continue; |
| 867 | |
| 868 | BUG_ON(blkcg->pd[i]); |
| 869 | cpd = kzalloc(pol->cpd_size, GFP_KERNEL); |
| 870 | if (!cpd) { |
| 871 | ret = ERR_PTR(-ENOMEM); |
| 872 | goto free_pd_blkcg; |
| 873 | } |
| 874 | blkcg->pd[i] = cpd; |
| 875 | cpd->plid = i; |
| 876 | pol->cpd_init_fn(blkcg); |
| 877 | } |
| 878 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 879 | done: |
| 880 | spin_lock_init(&blkcg->lock); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 881 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 882 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 883 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 884 | INIT_LIST_HEAD(&blkcg->cgwb_list); |
| 885 | #endif |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 886 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
| 887 | |
| 888 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 889 | return &blkcg->css; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 890 | |
| 891 | free_pd_blkcg: |
| 892 | for (i--; i >= 0; i--) |
| 893 | kfree(blkcg->pd[i]); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 894 | free_blkcg: |
| 895 | kfree(blkcg); |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 896 | mutex_unlock(&blkcg_pol_mutex); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 897 | return ret; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 898 | } |
| 899 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 900 | /** |
| 901 | * blkcg_init_queue - initialize blkcg part of request queue |
| 902 | * @q: request_queue to initialize |
| 903 | * |
| 904 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg |
| 905 | * part of new request_queue @q. |
| 906 | * |
| 907 | * RETURNS: |
| 908 | * 0 on success, -errno on failure. |
| 909 | */ |
| 910 | int blkcg_init_queue(struct request_queue *q) |
| 911 | { |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 912 | struct blkcg_gq *new_blkg, *blkg; |
| 913 | bool preloaded; |
| 914 | int ret; |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 915 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 916 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); |
| 917 | if (!new_blkg) |
| 918 | return -ENOMEM; |
| 919 | |
| 920 | preloaded = !radix_tree_preload(GFP_KERNEL); |
| 921 | |
| 922 | /* |
| 923 | * Make sure the root blkg exists and count the existing blkgs. As |
| 924 | * @q is bypassing at this point, blkg_lookup_create() can't be |
| 925 | * used. Open code insertion. |
| 926 | */ |
| 927 | rcu_read_lock(); |
| 928 | spin_lock_irq(q->queue_lock); |
| 929 | blkg = blkg_create(&blkcg_root, q, new_blkg); |
| 930 | spin_unlock_irq(q->queue_lock); |
| 931 | rcu_read_unlock(); |
| 932 | |
| 933 | if (preloaded) |
| 934 | radix_tree_preload_end(); |
| 935 | |
| 936 | if (IS_ERR(blkg)) { |
| 937 | kfree(new_blkg); |
| 938 | return PTR_ERR(blkg); |
| 939 | } |
| 940 | |
| 941 | q->root_blkg = blkg; |
| 942 | q->root_rl.blkg = blkg; |
| 943 | |
| 944 | ret = blk_throtl_init(q); |
| 945 | if (ret) { |
| 946 | spin_lock_irq(q->queue_lock); |
| 947 | blkg_destroy_all(q); |
| 948 | spin_unlock_irq(q->queue_lock); |
| 949 | } |
| 950 | return ret; |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | /** |
| 954 | * blkcg_drain_queue - drain blkcg part of request_queue |
| 955 | * @q: request_queue to drain |
| 956 | * |
| 957 | * Called from blk_drain_queue(). Responsible for draining blkcg part. |
| 958 | */ |
| 959 | void blkcg_drain_queue(struct request_queue *q) |
| 960 | { |
| 961 | lockdep_assert_held(q->queue_lock); |
| 962 | |
Tejun Heo | 0b462c8 | 2014-07-05 18:43:21 -0400 | [diff] [blame] | 963 | /* |
| 964 | * @q could be exiting and already have destroyed all blkgs as |
| 965 | * indicated by NULL root_blkg. If so, don't confuse policies. |
| 966 | */ |
| 967 | if (!q->root_blkg) |
| 968 | return; |
| 969 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 970 | blk_throtl_drain(q); |
| 971 | } |
| 972 | |
| 973 | /** |
| 974 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 975 | * @q: request_queue being released |
| 976 | * |
| 977 | * Called from blk_release_queue(). Responsible for exiting blkcg part. |
| 978 | */ |
| 979 | void blkcg_exit_queue(struct request_queue *q) |
| 980 | { |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 981 | spin_lock_irq(q->queue_lock); |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 982 | blkg_destroy_all(q); |
Tejun Heo | 6d18b00 | 2012-04-13 13:11:35 -0700 | [diff] [blame] | 983 | spin_unlock_irq(q->queue_lock); |
| 984 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 985 | blk_throtl_exit(q); |
| 986 | } |
| 987 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 988 | /* |
| 989 | * We cannot support shared io contexts, as we have no mean to support |
| 990 | * two tasks with the same ioc in two different groups without major rework |
| 991 | * of the main cic data structures. For now we allow a task to change |
| 992 | * its cgroup only if it's the only owner of its ioc. |
| 993 | */ |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 994 | static int blkcg_can_attach(struct cgroup_subsys_state *css, |
| 995 | struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 996 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 997 | struct task_struct *task; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 998 | struct io_context *ioc; |
| 999 | int ret = 0; |
| 1000 | |
| 1001 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | 924f0d9 | 2014-02-13 06:58:41 -0500 | [diff] [blame] | 1002 | cgroup_taskset_for_each(task, tset) { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1003 | task_lock(task); |
| 1004 | ioc = task->io_context; |
| 1005 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 1006 | ret = -EINVAL; |
| 1007 | task_unlock(task); |
| 1008 | if (ret) |
| 1009 | break; |
| 1010 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1011 | return ret; |
| 1012 | } |
| 1013 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 1014 | struct cgroup_subsys blkio_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1015 | .css_alloc = blkcg_css_alloc, |
| 1016 | .css_offline = blkcg_css_offline, |
| 1017 | .css_free = blkcg_css_free, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1018 | .can_attach = blkcg_can_attach, |
Tejun Heo | 5577964 | 2014-07-15 11:05:09 -0400 | [diff] [blame] | 1019 | .legacy_cftypes = blkcg_files, |
Tejun Heo | 1ced953 | 2014-07-08 18:02:57 -0400 | [diff] [blame] | 1020 | #ifdef CONFIG_MEMCG |
| 1021 | /* |
| 1022 | * This ensures that, if available, memcg is automatically enabled |
| 1023 | * together on the default hierarchy so that the owner cgroup can |
| 1024 | * be retrieved from writeback pages. |
| 1025 | */ |
| 1026 | .depends_on = 1 << memory_cgrp_id, |
| 1027 | #endif |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1028 | }; |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 1029 | EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1030 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1031 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1032 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 1033 | * @q: request_queue of interest |
| 1034 | * @pol: blkcg policy to activate |
| 1035 | * |
| 1036 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 1037 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 1038 | * |
| 1039 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 1040 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 1041 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 1042 | * always enough for dereferencing policy data. |
| 1043 | * |
| 1044 | * The caller is responsible for synchronizing [de]activations and policy |
| 1045 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 1046 | */ |
| 1047 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1048 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1049 | { |
| 1050 | LIST_HEAD(pds); |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1051 | struct blkcg_gq *blkg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1052 | struct blkg_policy_data *pd, *nd; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1053 | int cnt = 0, ret; |
| 1054 | |
| 1055 | if (blkcg_policy_enabled(q, pol)) |
| 1056 | return 0; |
| 1057 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1058 | /* count and allocate policy_data for all existing blkgs */ |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1059 | blk_queue_bypass_start(q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1060 | spin_lock_irq(q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1061 | list_for_each_entry(blkg, &q->blkg_list, q_node) |
| 1062 | cnt++; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1063 | spin_unlock_irq(q->queue_lock); |
| 1064 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1065 | /* allocate per-blkg policy data for all existing blkgs */ |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1066 | while (cnt--) { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 1067 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1068 | if (!pd) { |
| 1069 | ret = -ENOMEM; |
| 1070 | goto out_free; |
| 1071 | } |
| 1072 | list_add_tail(&pd->alloc_node, &pds); |
| 1073 | } |
| 1074 | |
| 1075 | /* |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1076 | * Install the allocated pds and cpds. With @q bypassing, no new blkg |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1077 | * should have been created while the queue lock was dropped. |
| 1078 | */ |
| 1079 | spin_lock_irq(q->queue_lock); |
| 1080 | |
| 1081 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1082 | if (WARN_ON(list_empty(&pds))) { |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1083 | /* umm... this shouldn't happen, just abort */ |
| 1084 | ret = -ENOMEM; |
| 1085 | goto out_unlock; |
| 1086 | } |
| 1087 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); |
| 1088 | list_del_init(&pd->alloc_node); |
| 1089 | |
| 1090 | /* grab blkcg lock too while installing @pd on @blkg */ |
| 1091 | spin_lock(&blkg->blkcg->lock); |
| 1092 | |
| 1093 | blkg->pd[pol->plid] = pd; |
| 1094 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 1095 | pd->plid = pol->plid; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 1096 | pol->pd_init_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1097 | |
| 1098 | spin_unlock(&blkg->blkcg->lock); |
| 1099 | } |
| 1100 | |
| 1101 | __set_bit(pol->plid, q->blkcg_pols); |
| 1102 | ret = 0; |
| 1103 | out_unlock: |
| 1104 | spin_unlock_irq(q->queue_lock); |
| 1105 | out_free: |
| 1106 | blk_queue_bypass_end(q); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1107 | list_for_each_entry_safe(pd, nd, &pds, alloc_node) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1108 | kfree(pd); |
| 1109 | return ret; |
| 1110 | } |
| 1111 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 1112 | |
| 1113 | /** |
| 1114 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 1115 | * @q: request_queue of interest |
| 1116 | * @pol: blkcg policy to deactivate |
| 1117 | * |
| 1118 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 1119 | * blkcg_activate_policy(). |
| 1120 | */ |
| 1121 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1122 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1123 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1124 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1125 | |
| 1126 | if (!blkcg_policy_enabled(q, pol)) |
| 1127 | return; |
| 1128 | |
| 1129 | blk_queue_bypass_start(q); |
| 1130 | spin_lock_irq(q->queue_lock); |
| 1131 | |
| 1132 | __clear_bit(pol->plid, q->blkcg_pols); |
| 1133 | |
| 1134 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 1135 | /* grab blkcg lock too while removing @pd from @blkg */ |
| 1136 | spin_lock(&blkg->blkcg->lock); |
| 1137 | |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 1138 | if (pol->pd_offline_fn) |
| 1139 | pol->pd_offline_fn(blkg); |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 1140 | if (pol->pd_exit_fn) |
| 1141 | pol->pd_exit_fn(blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1142 | |
| 1143 | kfree(blkg->pd[pol->plid]); |
| 1144 | blkg->pd[pol->plid] = NULL; |
| 1145 | |
| 1146 | spin_unlock(&blkg->blkcg->lock); |
| 1147 | } |
| 1148 | |
| 1149 | spin_unlock_irq(q->queue_lock); |
| 1150 | blk_queue_bypass_end(q); |
| 1151 | } |
| 1152 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 1153 | |
| 1154 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1155 | * blkcg_policy_register - register a blkcg policy |
| 1156 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1157 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1158 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 1159 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1160 | */ |
Jens Axboe | d5bf029 | 2014-06-22 16:31:56 -0600 | [diff] [blame] | 1161 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1162 | { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1163 | struct blkcg *blkcg; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1164 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 1165 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 1166 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
| 1167 | return -EINVAL; |
| 1168 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1169 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1170 | mutex_lock(&blkcg_pol_mutex); |
| 1171 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1172 | /* find an empty slot */ |
| 1173 | ret = -ENOSPC; |
| 1174 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1175 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1176 | break; |
| 1177 | if (i >= BLKCG_MAX_POLS) |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1178 | goto err_unlock; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 1179 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1180 | /* register @pol */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1181 | pol->plid = i; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1182 | blkcg_policy[pol->plid] = pol; |
| 1183 | |
| 1184 | /* allocate and install cpd's */ |
| 1185 | if (pol->cpd_size) { |
| 1186 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1187 | struct blkcg_policy_data *cpd; |
| 1188 | |
| 1189 | cpd = kzalloc(pol->cpd_size, GFP_KERNEL); |
| 1190 | if (!cpd) { |
| 1191 | mutex_unlock(&blkcg_pol_mutex); |
| 1192 | goto err_free_cpds; |
| 1193 | } |
| 1194 | |
| 1195 | blkcg->pd[pol->plid] = cpd; |
| 1196 | cpd->plid = pol->plid; |
| 1197 | pol->cpd_init_fn(blkcg); |
| 1198 | } |
| 1199 | } |
| 1200 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1201 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1202 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1203 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1204 | if (pol->cftypes) |
Tejun Heo | 2cf669a | 2014-07-15 11:05:09 -0400 | [diff] [blame] | 1205 | WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, |
| 1206 | pol->cftypes)); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1207 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1208 | return 0; |
| 1209 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1210 | err_free_cpds: |
| 1211 | if (pol->cpd_size) { |
| 1212 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1213 | kfree(blkcg->pd[pol->plid]); |
| 1214 | blkcg->pd[pol->plid] = NULL; |
| 1215 | } |
| 1216 | } |
| 1217 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1218 | err_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1219 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1220 | mutex_unlock(&blkcg_pol_register_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1221 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1222 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1223 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1224 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1225 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1226 | * blkcg_policy_unregister - unregister a blkcg policy |
| 1227 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1228 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1229 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1230 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1231 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1232 | { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1233 | struct blkcg *blkcg; |
| 1234 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1235 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1236 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1237 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1238 | goto out_unlock; |
| 1239 | |
| 1240 | /* kill the intf files first */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1241 | if (pol->cftypes) |
Tejun Heo | 2bb566c | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 1242 | cgroup_rm_cftypes(pol->cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1243 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1244 | /* remove cpds and unregister */ |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1245 | mutex_lock(&blkcg_pol_mutex); |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1246 | |
| 1247 | if (pol->cpd_size) { |
| 1248 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1249 | kfree(blkcg->pd[pol->plid]); |
| 1250 | blkcg->pd[pol->plid] = NULL; |
| 1251 | } |
| 1252 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1253 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame^] | 1254 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1255 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1256 | out_unlock: |
| 1257 | mutex_unlock(&blkcg_pol_register_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1258 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1259 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |