Christoph Hellwig | 3dcf60b | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Common Block IO controller cgroup interface |
| 4 | * |
| 5 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 6 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 7 | * |
| 8 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 9 | * Paolo Valente <paolo.valente@unimore.it> |
| 10 | * |
| 11 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 12 | * Nauman Rafique <nauman@google.com> |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 13 | * |
| 14 | * For policy-specific per-blkcg data: |
| 15 | * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> |
| 16 | * Arianna Avanzini <avanzini.arianna@gmail.com> |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 17 | */ |
| 18 | #include <linux/ioprio.h> |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 19 | #include <linux/kdev_t.h> |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 20 | #include <linux/module.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 21 | #include <linux/sched/signal.h> |
Stephen Rothwell | accee78 | 2009-12-07 19:29:39 +1100 | [diff] [blame] | 22 | #include <linux/err.h> |
Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 23 | #include <linux/blkdev.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 24 | #include <linux/backing-dev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 26 | #include <linux/genhd.h> |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 27 | #include <linux/delay.h> |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 28 | #include <linux/atomic.h> |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 29 | #include <linux/ctype.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 30 | #include <linux/blk-cgroup.h> |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 31 | #include <linux/tracehook.h> |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 32 | #include <linux/psi.h> |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 33 | #include "blk.h" |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 34 | |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 35 | #define MAX_KEY_LEN 100 |
| 36 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 37 | /* |
| 38 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. |
| 39 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire |
| 40 | * policy [un]register operations including cgroup file additions / |
| 41 | * removals. Putting cgroup file registration outside blkcg_pol_mutex |
| 42 | * allows grabbing it from cgroup callbacks. |
| 43 | */ |
| 44 | static DEFINE_MUTEX(blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 45 | static DEFINE_MUTEX(blkcg_pol_mutex); |
Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 46 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 47 | struct blkcg blkcg_root; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 48 | EXPORT_SYMBOL_GPL(blkcg_root); |
Vivek Goyal | 9d6a986 | 2009-12-04 10:36:41 -0500 | [diff] [blame] | 49 | |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 50 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
Tejun Heo | 9b0eb69 | 2019-06-27 13:39:48 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL_GPL(blkcg_root_css); |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 52 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 53 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 54 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 55 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
| 56 | |
Tejun Heo | 07b0fde | 2019-07-16 07:58:31 -0700 | [diff] [blame] | 57 | bool blkcg_debug_stats = false; |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 58 | static struct workqueue_struct *blkcg_punt_bio_wq; |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 59 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 60 | static bool blkcg_policy_enabled(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 61 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 62 | { |
| 63 | return pol && test_bit(pol->plid, q->blkcg_pols); |
| 64 | } |
| 65 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 66 | /** |
| 67 | * blkg_free - free a blkg |
| 68 | * @blkg: blkg to free |
| 69 | * |
| 70 | * Free @blkg which may be partially allocated. |
| 71 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 72 | static void blkg_free(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 73 | { |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 74 | int i; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 75 | |
| 76 | if (!blkg) |
| 77 | return; |
| 78 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 79 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 80 | if (blkg->pd[i]) |
| 81 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 82 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 83 | free_percpu(blkg->iostat_cpu); |
Tejun Heo | ef069b9 | 2019-06-13 15:30:39 -0700 | [diff] [blame] | 84 | percpu_ref_exit(&blkg->refcnt); |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 85 | kfree(blkg); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 86 | } |
| 87 | |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 88 | static void __blkg_release(struct rcu_head *rcu) |
| 89 | { |
| 90 | struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); |
| 91 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 92 | WARN_ON(!bio_list_empty(&blkg->async_bios)); |
| 93 | |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 94 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
| 95 | css_put(&blkg->blkcg->css); |
| 96 | if (blkg->parent) |
| 97 | blkg_put(blkg->parent); |
| 98 | |
| 99 | wb_congested_put(blkg->wb_congested); |
| 100 | |
| 101 | blkg_free(blkg); |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * A group is RCU protected, but having an rcu lock does not mean that one |
| 106 | * can access all the fields of blkg and assume these are valid. For |
| 107 | * example, don't try to follow throtl_data and request queue links. |
| 108 | * |
| 109 | * Having a reference to blkg under an rcu allows accesses to only values |
| 110 | * local to groups like group stats and group rate limits. |
| 111 | */ |
| 112 | static void blkg_release(struct percpu_ref *ref) |
| 113 | { |
| 114 | struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); |
| 115 | |
| 116 | call_rcu(&blkg->rcu_head, __blkg_release); |
| 117 | } |
| 118 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 119 | static void blkg_async_bio_workfn(struct work_struct *work) |
| 120 | { |
| 121 | struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, |
| 122 | async_bio_work); |
| 123 | struct bio_list bios = BIO_EMPTY_LIST; |
| 124 | struct bio *bio; |
| 125 | |
| 126 | /* as long as there are pending bios, @blkg can't go away */ |
| 127 | spin_lock_bh(&blkg->async_bio_lock); |
| 128 | bio_list_merge(&bios, &blkg->async_bios); |
| 129 | bio_list_init(&blkg->async_bios); |
| 130 | spin_unlock_bh(&blkg->async_bio_lock); |
| 131 | |
| 132 | while ((bio = bio_list_pop(&bios))) |
| 133 | submit_bio(bio); |
| 134 | } |
| 135 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 136 | /** |
| 137 | * blkg_alloc - allocate a blkg |
| 138 | * @blkcg: block cgroup the new blkg is associated with |
| 139 | * @q: request_queue the new blkg is associated with |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 140 | * @gfp_mask: allocation mask to use |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 141 | * |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 142 | * Allocate a new blkg assocating @blkcg and @q. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 143 | */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 144 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
| 145 | gfp_t gfp_mask) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 146 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 147 | struct blkcg_gq *blkg; |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 148 | int i, cpu; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 149 | |
| 150 | /* alloc and init base part */ |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 151 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 152 | if (!blkg) |
| 153 | return NULL; |
| 154 | |
Tejun Heo | ef069b9 | 2019-06-13 15:30:39 -0700 | [diff] [blame] | 155 | if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) |
| 156 | goto err_free; |
| 157 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 158 | blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); |
| 159 | if (!blkg->iostat_cpu) |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 160 | goto err_free; |
| 161 | |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 162 | blkg->q = q; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 163 | INIT_LIST_HEAD(&blkg->q_node); |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 164 | spin_lock_init(&blkg->async_bio_lock); |
| 165 | bio_list_init(&blkg->async_bios); |
| 166 | INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 167 | blkg->blkcg = blkcg; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 168 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 169 | u64_stats_init(&blkg->iostat.sync); |
| 170 | for_each_possible_cpu(cpu) |
| 171 | u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); |
| 172 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 173 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 174 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 175 | struct blkg_policy_data *pd; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 176 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 177 | if (!blkcg_policy_enabled(q, pol)) |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 178 | continue; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 179 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 180 | /* alloc per-policy data and attach it to blkg */ |
Tejun Heo | cf09a8e | 2019-08-28 15:05:51 -0700 | [diff] [blame] | 181 | pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 182 | if (!pd) |
| 183 | goto err_free; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 184 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 185 | blkg->pd[i] = pd; |
| 186 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 187 | pd->plid = i; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 188 | } |
| 189 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 190 | return blkg; |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 191 | |
| 192 | err_free: |
| 193 | blkg_free(blkg); |
| 194 | return NULL; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 195 | } |
| 196 | |
Tejun Heo | 24f2904 | 2015-08-18 14:55:17 -0700 | [diff] [blame] | 197 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
| 198 | struct request_queue *q, bool update_hint) |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 199 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 200 | struct blkcg_gq *blkg; |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 201 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 202 | /* |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 203 | * Hint didn't match. Look up from the radix tree. Note that the |
| 204 | * hint can only be updated under queue_lock as otherwise @blkg |
| 205 | * could have already been removed from blkg_tree. The caller is |
| 206 | * responsible for grabbing queue_lock if @update_hint. |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 207 | */ |
| 208 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 209 | if (blkg && blkg->q == q) { |
| 210 | if (update_hint) { |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 211 | lockdep_assert_held(&q->queue_lock); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 212 | rcu_assign_pointer(blkcg->blkg_hint, blkg); |
| 213 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 214 | return blkg; |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 215 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 216 | |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 217 | return NULL; |
| 218 | } |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 219 | EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); |
Tejun Heo | 80fd997 | 2012-04-13 14:50:53 -0700 | [diff] [blame] | 220 | |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 221 | /* |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 222 | * If @new_blkg is %NULL, this function tries to allocate a new one as |
| 223 | * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 224 | */ |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 225 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 226 | struct request_queue *q, |
| 227 | struct blkcg_gq *new_blkg) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 228 | { |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 229 | struct blkcg_gq *blkg; |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 230 | struct bdi_writeback_congested *wb_congested; |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 231 | int i, ret; |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 232 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 233 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 234 | lockdep_assert_held(&q->queue_lock); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 235 | |
Dennis Zhou | 0273ac3 | 2018-12-11 18:03:08 -0500 | [diff] [blame] | 236 | /* request_queue is dying, do not create/recreate a blkg */ |
| 237 | if (blk_queue_dying(q)) { |
| 238 | ret = -ENODEV; |
| 239 | goto err_free_blkg; |
| 240 | } |
| 241 | |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 242 | /* blkg holds a reference to blkcg */ |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 243 | if (!css_tryget_online(&blkcg->css)) { |
Tejun Heo | 20386ce | 2015-08-18 14:55:28 -0700 | [diff] [blame] | 244 | ret = -ENODEV; |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 245 | goto err_free_blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 246 | } |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 247 | |
Tahsin Erdogan | 7fc6b87 | 2017-03-09 00:05:31 -0800 | [diff] [blame] | 248 | wb_congested = wb_congested_get_create(q->backing_dev_info, |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 249 | blkcg->css.id, |
| 250 | GFP_NOWAIT | __GFP_NOWARN); |
| 251 | if (!wb_congested) { |
Tahsin Erdogan | 7fc6b87 | 2017-03-09 00:05:31 -0800 | [diff] [blame] | 252 | ret = -ENOMEM; |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 253 | goto err_put_css; |
Tahsin Erdogan | 7fc6b87 | 2017-03-09 00:05:31 -0800 | [diff] [blame] | 254 | } |
| 255 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 256 | /* allocate */ |
| 257 | if (!new_blkg) { |
| 258 | new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); |
| 259 | if (unlikely(!new_blkg)) { |
| 260 | ret = -ENOMEM; |
| 261 | goto err_put_congested; |
| 262 | } |
| 263 | } |
| 264 | blkg = new_blkg; |
Tahsin Erdogan | 7fc6b87 | 2017-03-09 00:05:31 -0800 | [diff] [blame] | 265 | blkg->wb_congested = wb_congested; |
| 266 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 267 | /* link parent */ |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 268 | if (blkcg_parent(blkcg)) { |
| 269 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); |
| 270 | if (WARN_ON_ONCE(!blkg->parent)) { |
Tejun Heo | 20386ce | 2015-08-18 14:55:28 -0700 | [diff] [blame] | 271 | ret = -ENODEV; |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 272 | goto err_put_congested; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 273 | } |
| 274 | blkg_get(blkg->parent); |
| 275 | } |
| 276 | |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 277 | /* invoke per-policy init */ |
| 278 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 279 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 280 | |
| 281 | if (blkg->pd[i] && pol->pd_init_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 282 | pol->pd_init_fn(blkg->pd[i]); |
Tejun Heo | db61367 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | /* insert */ |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 286 | spin_lock(&blkcg->lock); |
| 287 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
| 288 | if (likely(!ret)) { |
| 289 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
| 290 | list_add(&blkg->q_node, &q->blkg_list); |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 291 | |
| 292 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 293 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 294 | |
| 295 | if (blkg->pd[i] && pol->pd_online_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 296 | pol->pd_online_fn(blkg->pd[i]); |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 297 | } |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 298 | } |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 299 | blkg->online = true; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 300 | spin_unlock(&blkcg->lock); |
| 301 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 302 | if (!ret) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 303 | return blkg; |
Tejun Heo | 1597499 | 2012-06-04 20:40:52 -0700 | [diff] [blame] | 304 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 305 | /* @blkg failed fully initialized, use the usual release path */ |
| 306 | blkg_put(blkg); |
| 307 | return ERR_PTR(ret); |
| 308 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 309 | err_put_congested: |
| 310 | wb_congested_put(wb_congested); |
| 311 | err_put_css: |
Tejun Heo | 496fb78 | 2012-04-19 16:29:23 -0700 | [diff] [blame] | 312 | css_put(&blkcg->css); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 313 | err_free_blkg: |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 314 | blkg_free(new_blkg); |
Tejun Heo | 93e6d5d | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 315 | return ERR_PTR(ret); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 316 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 317 | |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 318 | /** |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 319 | * __blkg_lookup_create - lookup blkg, try to create one if not there |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 320 | * @blkcg: blkcg of interest |
| 321 | * @q: request_queue of interest |
| 322 | * |
| 323 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 324 | * create one. blkg creation is performed recursively from blkcg_root such |
| 325 | * that all non-root blkg's have access to the parent blkg. This function |
| 326 | * should be called under RCU read lock and @q->queue_lock. |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 327 | * |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 328 | * Returns the blkg or the closest blkg if blkg_create() fails as it walks |
| 329 | * down from root. |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 330 | */ |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 331 | struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
| 332 | struct request_queue *q) |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 333 | { |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 334 | struct blkcg_gq *blkg; |
| 335 | |
| 336 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 337 | lockdep_assert_held(&q->queue_lock); |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 338 | |
Tejun Heo | 86cde6b | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 339 | blkg = __blkg_lookup(blkcg, q, true); |
| 340 | if (blkg) |
| 341 | return blkg; |
| 342 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 343 | /* |
| 344 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 345 | * non-root blkgs have access to their parents. Returns the closest |
| 346 | * blkg to the intended blkg should blkg_create() fail. |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 347 | */ |
| 348 | while (true) { |
| 349 | struct blkcg *pos = blkcg; |
| 350 | struct blkcg *parent = blkcg_parent(blkcg); |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 351 | struct blkcg_gq *ret_blkg = q->root_blkg; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 352 | |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 353 | while (parent) { |
| 354 | blkg = __blkg_lookup(parent, q, false); |
| 355 | if (blkg) { |
| 356 | /* remember closest blkg */ |
| 357 | ret_blkg = blkg; |
| 358 | break; |
| 359 | } |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 360 | pos = parent; |
| 361 | parent = blkcg_parent(parent); |
| 362 | } |
| 363 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 364 | blkg = blkg_create(pos, q, NULL); |
Dennis Zhou | beea9da | 2018-12-05 12:10:28 -0500 | [diff] [blame] | 365 | if (IS_ERR(blkg)) |
| 366 | return ret_blkg; |
| 367 | if (pos == blkcg) |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 368 | return blkg; |
| 369 | } |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 370 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 371 | |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 372 | /** |
| 373 | * blkg_lookup_create - find or create a blkg |
| 374 | * @blkcg: target block cgroup |
| 375 | * @q: target request_queue |
| 376 | * |
| 377 | * This looks up or creates the blkg representing the unique pair |
| 378 | * of the blkcg and the request_queue. |
| 379 | */ |
| 380 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 381 | struct request_queue *q) |
| 382 | { |
| 383 | struct blkcg_gq *blkg = blkg_lookup(blkcg, q); |
| 384 | |
| 385 | if (unlikely(!blkg)) { |
Ming Lei | 3a762de | 2018-12-20 00:29:15 +0800 | [diff] [blame] | 386 | unsigned long flags; |
| 387 | |
| 388 | spin_lock_irqsave(&q->queue_lock, flags); |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 389 | blkg = __blkg_lookup_create(blkcg, q); |
Ming Lei | 3a762de | 2018-12-20 00:29:15 +0800 | [diff] [blame] | 390 | spin_unlock_irqrestore(&q->queue_lock, flags); |
Dennis Zhou | b978962 | 2018-12-05 12:10:27 -0500 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | return blkg; |
| 394 | } |
| 395 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 396 | static void blkg_destroy(struct blkcg_gq *blkg) |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 397 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 398 | struct blkcg *blkcg = blkg->blkcg; |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 399 | int i; |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 400 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 401 | lockdep_assert_held(&blkg->q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 402 | lockdep_assert_held(&blkcg->lock); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 403 | |
| 404 | /* Something wrong if we are trying to remove same group twice */ |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 405 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 406 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 407 | |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 408 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 409 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 410 | |
| 411 | if (blkg->pd[i] && pol->pd_offline_fn) |
| 412 | pol->pd_offline_fn(blkg->pd[i]); |
| 413 | } |
| 414 | |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 415 | blkg->online = false; |
| 416 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 417 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 418 | list_del_init(&blkg->q_node); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 419 | hlist_del_init_rcu(&blkg->blkcg_node); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 420 | |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 421 | /* |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 422 | * Both setting lookup hint to and clearing it from @blkg are done |
| 423 | * under queue_lock. If it's not pointing to @blkg now, it never |
| 424 | * will. Hint assignment itself can race safely. |
| 425 | */ |
Paul E. McKenney | ec6c676a | 2014-02-17 13:35:57 -0800 | [diff] [blame] | 426 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 427 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
| 428 | |
| 429 | /* |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 430 | * Put the reference taken at the time of creation so that when all |
| 431 | * queues are gone, group can be destroyed. |
| 432 | */ |
Dennis Zhou | 7fcf2b0 | 2018-12-05 12:10:38 -0500 | [diff] [blame] | 433 | percpu_ref_kill(&blkg->refcnt); |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 434 | } |
| 435 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 436 | /** |
| 437 | * blkg_destroy_all - destroy all blkgs associated with a request_queue |
| 438 | * @q: request_queue of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 439 | * |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 440 | * Destroy all blkgs associated with @q. |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 441 | */ |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 442 | static void blkg_destroy_all(struct request_queue *q) |
Tejun Heo | 03aa264 | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 443 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 444 | struct blkcg_gq *blkg, *n; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 445 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 446 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 447 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 448 | struct blkcg *blkcg = blkg->blkcg; |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 449 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 450 | spin_lock(&blkcg->lock); |
| 451 | blkg_destroy(blkg); |
| 452 | spin_unlock(&blkcg->lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 453 | } |
Tejun Heo | 6fe810b | 2015-09-05 15:47:36 -0400 | [diff] [blame] | 454 | |
| 455 | q->root_blkg = NULL; |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 456 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 72e06c2 | 2012-03-05 13:15:00 -0800 | [diff] [blame] | 457 | } |
| 458 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 459 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
| 460 | struct cftype *cftype, u64 val) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 461 | { |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 462 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 463 | struct blkcg_gq *blkg; |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 464 | int i, cpu; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 465 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 466 | mutex_lock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 467 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 997a026 | 2012-03-08 10:53:58 -0800 | [diff] [blame] | 468 | |
| 469 | /* |
| 470 | * Note that stat reset is racy - it doesn't synchronize against |
| 471 | * stat updates. This is a debug feature which shouldn't exist |
| 472 | * anyway. If you get hit by a race, retry. |
| 473 | */ |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 474 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 475 | for_each_possible_cpu(cpu) { |
| 476 | struct blkg_iostat_set *bis = |
| 477 | per_cpu_ptr(blkg->iostat_cpu, cpu); |
| 478 | memset(bis, 0, sizeof(*bis)); |
| 479 | } |
| 480 | memset(&blkg->iostat, 0, sizeof(blkg->iostat)); |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 481 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 482 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 483 | struct blkcg_policy *pol = blkcg_policy[i]; |
Tejun Heo | 549d3aa | 2012-03-05 13:15:16 -0800 | [diff] [blame] | 484 | |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 485 | if (blkg->pd[i] && pol->pd_reset_stats_fn) |
| 486 | pol->pd_reset_stats_fn(blkg->pd[i]); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 487 | } |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 488 | } |
Vivek Goyal | f0bdc8c | 2011-05-19 15:38:30 -0400 | [diff] [blame] | 489 | |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 490 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 491 | mutex_unlock(&blkcg_pol_mutex); |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 492 | return 0; |
| 493 | } |
| 494 | |
Tejun Heo | dd165eb | 2015-08-18 14:55:33 -0700 | [diff] [blame] | 495 | const char *blkg_dev_name(struct blkcg_gq *blkg) |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 496 | { |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 497 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 498 | if (blkg->q->backing_dev_info->dev) |
| 499 | return dev_name(blkg->q->backing_dev_info->dev); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 500 | return NULL; |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 501 | } |
| 502 | |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 503 | /** |
| 504 | * blkcg_print_blkgs - helper for printing per-blkg data |
| 505 | * @sf: seq_file to print to |
| 506 | * @blkcg: blkcg of interest |
| 507 | * @prfill: fill function to print out a blkg |
| 508 | * @pol: policy in question |
| 509 | * @data: data to be passed to @prfill |
| 510 | * @show_total: to print out sum of prfill return values or not |
| 511 | * |
| 512 | * This function invokes @prfill on each blkg of @blkcg if pd for the |
| 513 | * policy specified by @pol exists. @prfill is invoked with @sf, the |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 514 | * policy data and @data and the matching queue lock held. If @show_total |
| 515 | * is %true, the sum of the return values from @prfill is printed with |
| 516 | * "Total" label at the end. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 517 | * |
| 518 | * This is to be used to construct print functions for |
| 519 | * cftype->read_seq_string method. |
| 520 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 521 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 522 | u64 (*prfill)(struct seq_file *, |
| 523 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 524 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 525 | bool show_total) |
Vivek Goyal | 5624a4e | 2011-05-19 15:38:28 -0400 | [diff] [blame] | 526 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 527 | struct blkcg_gq *blkg; |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 528 | u64 total = 0; |
| 529 | |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 530 | rcu_read_lock(); |
Linus Torvalds | ee89f81 | 2013-02-28 12:52:24 -0800 | [diff] [blame] | 531 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 532 | spin_lock_irq(&blkg->q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 533 | if (blkcg_policy_enabled(blkg->q, pol)) |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 534 | total += prfill(sf, blkg->pd[pol->plid], data); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 535 | spin_unlock_irq(&blkg->q->queue_lock); |
Tejun Heo | 810ecfa | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 536 | } |
| 537 | rcu_read_unlock(); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 538 | |
| 539 | if (show_total) |
| 540 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); |
| 541 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 542 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 543 | |
| 544 | /** |
| 545 | * __blkg_prfill_u64 - prfill helper for a single u64 value |
| 546 | * @sf: seq_file to print to |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 547 | * @pd: policy private data of interest |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 548 | * @v: value to print |
| 549 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 550 | * Print @v to @sf for the device assocaited with @pd. |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 551 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 552 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 553 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 554 | const char *dname = blkg_dev_name(pd->blkg); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 555 | |
| 556 | if (!dname) |
| 557 | return 0; |
| 558 | |
| 559 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); |
| 560 | return v; |
| 561 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 562 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
Tejun Heo | d3d32e6 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 563 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 564 | /* Performs queue bypass and policy enabled checks then looks up blkg. */ |
| 565 | static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, |
| 566 | const struct blkcg_policy *pol, |
| 567 | struct request_queue *q) |
| 568 | { |
| 569 | WARN_ON_ONCE(!rcu_read_lock_held()); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 570 | lockdep_assert_held(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 571 | |
| 572 | if (!blkcg_policy_enabled(q, pol)) |
| 573 | return ERR_PTR(-EOPNOTSUPP); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 574 | return __blkg_lookup(blkcg, q, true /* update_hint */); |
| 575 | } |
| 576 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 577 | /** |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 578 | * blkg_conf_prep - parse and prepare for per-blkg config update |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 579 | * @inputp: input string pointer |
| 580 | * |
| 581 | * Parse the device node prefix part, MAJ:MIN, of per-blkg config update |
| 582 | * from @input and get and return the matching gendisk. *@inputp is |
| 583 | * updated to point past the device node prefix. Returns an ERR_PTR() |
| 584 | * value on error. |
| 585 | * |
| 586 | * Use this function iff blkg_conf_prep() can't be used for some reason. |
| 587 | */ |
| 588 | struct gendisk *blkcg_conf_get_disk(char **inputp) |
| 589 | { |
| 590 | char *input = *inputp; |
| 591 | unsigned int major, minor; |
| 592 | struct gendisk *disk; |
| 593 | int key_len, part; |
| 594 | |
| 595 | if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) |
| 596 | return ERR_PTR(-EINVAL); |
| 597 | |
| 598 | input += key_len; |
| 599 | if (!isspace(*input)) |
| 600 | return ERR_PTR(-EINVAL); |
| 601 | input = skip_spaces(input); |
| 602 | |
| 603 | disk = get_gendisk(MKDEV(major, minor), &part); |
| 604 | if (!disk) |
| 605 | return ERR_PTR(-ENODEV); |
| 606 | if (part) { |
| 607 | put_disk_and_module(disk); |
| 608 | return ERR_PTR(-ENODEV); |
| 609 | } |
| 610 | |
| 611 | *inputp = input; |
| 612 | return disk; |
| 613 | } |
| 614 | |
| 615 | /** |
| 616 | * blkg_conf_prep - parse and prepare for per-blkg config update |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 617 | * @blkcg: target block cgroup |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 618 | * @pol: target policy |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 619 | * @input: input string |
| 620 | * @ctx: blkg_conf_ctx to be filled |
| 621 | * |
| 622 | * Parse per-blkg config update from @input and initialize @ctx with the |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 623 | * result. @ctx->blkg points to the blkg to be updated and @ctx->body the |
| 624 | * part of @input following MAJ:MIN. This function returns with RCU read |
| 625 | * lock and queue lock held and must be paired with blkg_conf_finish(). |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 626 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 627 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 628 | char *input, struct blkg_conf_ctx *ctx) |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 629 | __acquires(rcu) __acquires(&disk->queue->queue_lock) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 630 | { |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 631 | struct gendisk *disk; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 632 | struct request_queue *q; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 633 | struct blkcg_gq *blkg; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 634 | int ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 635 | |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 636 | disk = blkcg_conf_get_disk(&input); |
| 637 | if (IS_ERR(disk)) |
| 638 | return PTR_ERR(disk); |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 639 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 640 | q = disk->queue; |
| 641 | |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 642 | rcu_read_lock(); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 643 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | da8b066 | 2012-04-13 13:11:29 -0700 | [diff] [blame] | 644 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 645 | blkg = blkg_lookup_check(blkcg, pol, q); |
Tejun Heo | 4bfd482 | 2012-03-05 13:15:08 -0800 | [diff] [blame] | 646 | if (IS_ERR(blkg)) { |
| 647 | ret = PTR_ERR(blkg); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 648 | goto fail_unlock; |
Vivek Goyal | 062a644 | 2010-09-15 17:06:33 -0400 | [diff] [blame] | 649 | } |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 650 | |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 651 | if (blkg) |
| 652 | goto success; |
| 653 | |
| 654 | /* |
| 655 | * Create blkgs walking down from blkcg_root to @blkcg, so that all |
| 656 | * non-root blkgs have access to their parents. |
| 657 | */ |
| 658 | while (true) { |
| 659 | struct blkcg *pos = blkcg; |
| 660 | struct blkcg *parent; |
| 661 | struct blkcg_gq *new_blkg; |
| 662 | |
| 663 | parent = blkcg_parent(blkcg); |
| 664 | while (parent && !__blkg_lookup(parent, q, false)) { |
| 665 | pos = parent; |
| 666 | parent = blkcg_parent(parent); |
| 667 | } |
| 668 | |
| 669 | /* Drop locks to do new blkg allocation with GFP_KERNEL. */ |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 670 | spin_unlock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 671 | rcu_read_unlock(); |
| 672 | |
| 673 | new_blkg = blkg_alloc(pos, q, GFP_KERNEL); |
| 674 | if (unlikely(!new_blkg)) { |
| 675 | ret = -ENOMEM; |
| 676 | goto fail; |
| 677 | } |
| 678 | |
| 679 | rcu_read_lock(); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 680 | spin_lock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 681 | |
| 682 | blkg = blkg_lookup_check(pos, pol, q); |
| 683 | if (IS_ERR(blkg)) { |
| 684 | ret = PTR_ERR(blkg); |
| 685 | goto fail_unlock; |
| 686 | } |
| 687 | |
| 688 | if (blkg) { |
| 689 | blkg_free(new_blkg); |
| 690 | } else { |
| 691 | blkg = blkg_create(pos, q, new_blkg); |
Kefeng Wang | 98d669b | 2019-06-05 22:24:27 +0800 | [diff] [blame] | 692 | if (IS_ERR(blkg)) { |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 693 | ret = PTR_ERR(blkg); |
| 694 | goto fail_unlock; |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | if (pos == blkcg) |
| 699 | goto success; |
| 700 | } |
| 701 | success: |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 702 | ctx->disk = disk; |
| 703 | ctx->blkg = blkg; |
Tejun Heo | 015d254 | 2019-08-28 15:05:53 -0700 | [diff] [blame] | 704 | ctx->body = input; |
Tejun Heo | 726fa69 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 705 | return 0; |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 706 | |
| 707 | fail_unlock: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 708 | spin_unlock_irq(&q->queue_lock); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 709 | rcu_read_unlock(); |
| 710 | fail: |
Jan Kara | 9df6c29 | 2018-02-26 13:01:39 +0100 | [diff] [blame] | 711 | put_disk_and_module(disk); |
Tahsin Erdogan | 457e490f | 2017-03-29 11:27:19 -0600 | [diff] [blame] | 712 | /* |
| 713 | * If queue was bypassing, we should retry. Do so after a |
| 714 | * short msleep(). It isn't strictly necessary but queue |
| 715 | * can be bypassing for some time and it's always nice to |
| 716 | * avoid busy looping. |
| 717 | */ |
| 718 | if (ret == -EBUSY) { |
| 719 | msleep(10); |
| 720 | ret = restart_syscall(); |
| 721 | } |
| 722 | return ret; |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 723 | } |
Pavel Begunkov | 89f3b6d | 2019-09-14 20:31:50 +0300 | [diff] [blame] | 724 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 725 | |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 726 | /** |
| 727 | * blkg_conf_finish - finish up per-blkg config update |
| 728 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() |
| 729 | * |
| 730 | * Finish up after per-blkg config update. This function must be paired |
| 731 | * with blkg_conf_prep(). |
| 732 | */ |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 733 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 734 | __releases(&ctx->disk->queue->queue_lock) __releases(rcu) |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 735 | { |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 736 | spin_unlock_irq(&ctx->disk->queue->queue_lock); |
Tejun Heo | 3a8b31d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 737 | rcu_read_unlock(); |
Jan Kara | 9df6c29 | 2018-02-26 13:01:39 +0100 | [diff] [blame] | 738 | put_disk_and_module(ctx->disk); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 739 | } |
Pavel Begunkov | 89f3b6d | 2019-09-14 20:31:50 +0300 | [diff] [blame] | 740 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
Gui Jianfeng | 34d0f17 | 2010-04-13 16:05:49 +0800 | [diff] [blame] | 741 | |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 742 | static int blkcg_print_stat(struct seq_file *sf, void *v) |
| 743 | { |
| 744 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); |
| 745 | struct blkcg_gq *blkg; |
| 746 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 747 | cgroup_rstat_flush(blkcg->css.cgroup); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 748 | rcu_read_lock(); |
| 749 | |
| 750 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 751 | struct blkg_iostat_set *bis = &blkg->iostat; |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 752 | const char *dname; |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 753 | char *buf; |
Tejun Heo | 636620b | 2018-07-18 04:47:41 -0700 | [diff] [blame] | 754 | u64 rbytes, wbytes, rios, wios, dbytes, dios; |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 755 | size_t size = seq_get_buf(sf, &buf), off = 0; |
| 756 | int i; |
| 757 | bool has_stats = false; |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 758 | unsigned seq; |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 759 | |
Tejun Heo | b081436 | 2019-11-05 08:09:51 -0800 | [diff] [blame] | 760 | spin_lock_irq(&blkg->q->queue_lock); |
| 761 | |
| 762 | if (!blkg->online) |
| 763 | goto skip; |
| 764 | |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 765 | dname = blkg_dev_name(blkg); |
| 766 | if (!dname) |
Tejun Heo | b081436 | 2019-11-05 08:09:51 -0800 | [diff] [blame] | 767 | goto skip; |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 768 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 769 | /* |
| 770 | * Hooray string manipulation, count is the size written NOT |
| 771 | * INCLUDING THE \0, so size is now count+1 less than what we |
| 772 | * had before, but we want to start writing the next bit from |
| 773 | * the \0 so we only add count to buf. |
| 774 | */ |
| 775 | off += scnprintf(buf+off, size-off, "%s ", dname); |
| 776 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 777 | do { |
| 778 | seq = u64_stats_fetch_begin(&bis->sync); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 779 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 780 | rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; |
| 781 | wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; |
| 782 | dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; |
| 783 | rios = bis->cur.ios[BLKG_IOSTAT_READ]; |
| 784 | wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; |
| 785 | dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; |
| 786 | } while (u64_stats_fetch_retry(&bis->sync, seq)); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 787 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 788 | if (rbytes || wbytes || rios || wios) { |
| 789 | has_stats = true; |
| 790 | off += scnprintf(buf+off, size-off, |
Tejun Heo | 636620b | 2018-07-18 04:47:41 -0700 | [diff] [blame] | 791 | "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", |
| 792 | rbytes, wbytes, rios, wios, |
| 793 | dbytes, dios); |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 794 | } |
| 795 | |
Tejun Heo | 07b0fde | 2019-07-16 07:58:31 -0700 | [diff] [blame] | 796 | if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 797 | has_stats = true; |
| 798 | off += scnprintf(buf+off, size-off, |
| 799 | " use_delay=%d delay_nsec=%llu", |
| 800 | atomic_read(&blkg->use_delay), |
| 801 | (unsigned long long)atomic64_read(&blkg->delay_nsec)); |
| 802 | } |
| 803 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 804 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 805 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 806 | size_t written; |
| 807 | |
| 808 | if (!blkg->pd[i] || !pol->pd_stat_fn) |
| 809 | continue; |
| 810 | |
| 811 | written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off); |
| 812 | if (written) |
| 813 | has_stats = true; |
| 814 | off += written; |
| 815 | } |
Tejun Heo | 07b0fde | 2019-07-16 07:58:31 -0700 | [diff] [blame] | 816 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 817 | if (has_stats) { |
Tejun Heo | f539da8 | 2019-06-13 15:30:38 -0700 | [diff] [blame] | 818 | if (off < size - 1) { |
| 819 | off += scnprintf(buf+off, size-off, "\n"); |
| 820 | seq_commit(sf, off); |
| 821 | } else { |
| 822 | seq_commit(sf, -1); |
| 823 | } |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 824 | } |
Tejun Heo | b081436 | 2019-11-05 08:09:51 -0800 | [diff] [blame] | 825 | skip: |
| 826 | spin_unlock_irq(&blkg->q->queue_lock); |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 827 | } |
| 828 | |
| 829 | rcu_read_unlock(); |
| 830 | return 0; |
| 831 | } |
| 832 | |
Bart Van Assche | e1f3b94 | 2016-06-14 17:04:32 +0200 | [diff] [blame] | 833 | static struct cftype blkcg_files[] = { |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 834 | { |
| 835 | .name = "stat", |
Tejun Heo | ca0752c | 2015-10-22 09:48:37 +0900 | [diff] [blame] | 836 | .flags = CFTYPE_NOT_ON_ROOT, |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 837 | .seq_show = blkcg_print_stat, |
| 838 | }, |
| 839 | { } /* terminate */ |
| 840 | }; |
| 841 | |
Bart Van Assche | e1f3b94 | 2016-06-14 17:04:32 +0200 | [diff] [blame] | 842 | static struct cftype blkcg_legacy_files[] = { |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 843 | { |
Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 844 | .name = "reset_stats", |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 845 | .write_u64 = blkcg_reset_stats, |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 846 | }, |
Tejun Heo | 4baf6e3 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 847 | { } /* terminate */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 848 | }; |
| 849 | |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 850 | /* |
| 851 | * blkcg destruction is a three-stage process. |
| 852 | * |
| 853 | * 1. Destruction starts. The blkcg_css_offline() callback is invoked |
| 854 | * which offlines writeback. Here we tie the next stage of blkg destruction |
| 855 | * to the completion of writeback associated with the blkcg. This lets us |
| 856 | * avoid punting potentially large amounts of outstanding writeback to root |
| 857 | * while maintaining any ongoing policies. The next stage is triggered when |
| 858 | * the nr_cgwbs count goes to zero. |
| 859 | * |
| 860 | * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called |
| 861 | * and handles the destruction of blkgs. Here the css reference held by |
| 862 | * the blkg is put back eventually allowing blkcg_css_free() to be called. |
| 863 | * This work may occur in cgwb_release_workfn() on the cgwb_release |
| 864 | * workqueue. Any submitted ios that fail to get the blkg ref will be |
| 865 | * punted to the root_blkg. |
| 866 | * |
| 867 | * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. |
| 868 | * This finally frees the blkcg. |
| 869 | */ |
| 870 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 871 | /** |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 872 | * blkcg_css_offline - cgroup css_offline callback |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 873 | * @css: css of interest |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 874 | * |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 875 | * This function is called when @css is about to go away. Here the cgwbs are |
| 876 | * offlined first and only once writeback associated with the blkcg has |
| 877 | * finished do we start step 2 (see above). |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 878 | */ |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 879 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 880 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 881 | struct blkcg *blkcg = css_to_blkcg(css); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 882 | |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 883 | /* this prevents anyone from attaching or migrating to this blkcg */ |
| 884 | wb_blkcg_offline(blkcg); |
| 885 | |
Tejun Heo | d866dbf | 2019-07-24 10:37:22 -0700 | [diff] [blame^] | 886 | /* put the base online pin allowing step 2 to be triggered */ |
| 887 | blkcg_unpin_online(blkcg); |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 888 | } |
| 889 | |
| 890 | /** |
| 891 | * blkcg_destroy_blkgs - responsible for shooting down blkgs |
| 892 | * @blkcg: blkcg of interest |
| 893 | * |
| 894 | * blkgs should be removed while holding both q and blkcg locks. As blkcg lock |
| 895 | * is nested inside q lock, this function performs reverse double lock dancing. |
| 896 | * Destroying the blkgs releases the reference held on the blkcg's css allowing |
| 897 | * blkcg_css_free to eventually be called. |
| 898 | * |
| 899 | * This is the blkcg counterpart of ioc_release_fn(). |
| 900 | */ |
| 901 | void blkcg_destroy_blkgs(struct blkcg *blkcg) |
| 902 | { |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 903 | spin_lock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 904 | |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 905 | while (!hlist_empty(&blkcg->blkg_list)) { |
| 906 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
| 907 | struct blkcg_gq, blkcg_node); |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 908 | struct request_queue *q = blkg->q; |
Vivek Goyal | b1c3576 | 2009-12-03 12:59:47 -0500 | [diff] [blame] | 909 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 910 | if (spin_trylock(&q->queue_lock)) { |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 911 | blkg_destroy(blkg); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 912 | spin_unlock(&q->queue_lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 913 | } else { |
| 914 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 915 | cpu_relax(); |
Dan Carpenter | a556793 | 2012-03-29 20:57:08 +0200 | [diff] [blame] | 916 | spin_lock_irq(&blkcg->lock); |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 917 | } |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 918 | } |
Jens Axboe | 0f3942a | 2010-05-03 14:28:55 +0200 | [diff] [blame] | 919 | |
Tejun Heo | 9f13ef6 | 2012-03-05 13:15:21 -0800 | [diff] [blame] | 920 | spin_unlock_irq(&blkcg->lock); |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 921 | } |
| 922 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 923 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 924 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 925 | struct blkcg *blkcg = css_to_blkcg(css); |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 926 | int i; |
Tejun Heo | 7ee9c56 | 2012-03-05 13:15:11 -0800 | [diff] [blame] | 927 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 928 | mutex_lock(&blkcg_pol_mutex); |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 929 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 930 | list_del(&blkcg->all_blkcgs_node); |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 931 | |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 932 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 933 | if (blkcg->cpd[i]) |
| 934 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
| 935 | |
| 936 | mutex_unlock(&blkcg_pol_mutex); |
| 937 | |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 938 | kfree(blkcg); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 939 | } |
| 940 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 941 | static struct cgroup_subsys_state * |
| 942 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 943 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 944 | struct blkcg *blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 945 | struct cgroup_subsys_state *ret; |
| 946 | int i; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 947 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 948 | mutex_lock(&blkcg_pol_mutex); |
| 949 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 950 | if (!parent_css) { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 951 | blkcg = &blkcg_root; |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 952 | } else { |
| 953 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
| 954 | if (!blkcg) { |
| 955 | ret = ERR_PTR(-ENOMEM); |
weiping zhang | 4c18c9e | 2017-08-25 23:49:32 +0800 | [diff] [blame] | 956 | goto unlock; |
Tejun Heo | bc915e6 | 2015-08-18 14:55:08 -0700 | [diff] [blame] | 957 | } |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 958 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 959 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 960 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { |
| 961 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 962 | struct blkcg_policy_data *cpd; |
| 963 | |
| 964 | /* |
| 965 | * If the policy hasn't been attached yet, wait for it |
| 966 | * to be attached before doing anything else. Otherwise, |
| 967 | * check if the policy requires any specific per-cgroup |
| 968 | * data: if it does, allocate and initialize it. |
| 969 | */ |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 970 | if (!pol || !pol->cpd_alloc_fn) |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 971 | continue; |
| 972 | |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 973 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 974 | if (!cpd) { |
| 975 | ret = ERR_PTR(-ENOMEM); |
| 976 | goto free_pd_blkcg; |
| 977 | } |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 978 | blkcg->cpd[i] = cpd; |
| 979 | cpd->blkcg = blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 980 | cpd->plid = i; |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 981 | if (pol->cpd_init_fn) |
| 982 | pol->cpd_init_fn(cpd); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 983 | } |
| 984 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 985 | spin_lock_init(&blkcg->lock); |
Tejun Heo | d866dbf | 2019-07-24 10:37:22 -0700 | [diff] [blame^] | 986 | refcount_set(&blkcg->online_pin, 1); |
Tejun Heo | e00f4f4 | 2016-11-21 18:03:32 -0500 | [diff] [blame] | 987 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 988 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 989 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 990 | INIT_LIST_HEAD(&blkcg->cgwb_list); |
| 991 | #endif |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 992 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
| 993 | |
| 994 | mutex_unlock(&blkcg_pol_mutex); |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 995 | return &blkcg->css; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 996 | |
| 997 | free_pd_blkcg: |
| 998 | for (i--; i >= 0; i--) |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 999 | if (blkcg->cpd[i]) |
| 1000 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); |
weiping zhang | 4c18c9e | 2017-08-25 23:49:32 +0800 | [diff] [blame] | 1001 | |
| 1002 | if (blkcg != &blkcg_root) |
| 1003 | kfree(blkcg); |
| 1004 | unlock: |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 1005 | mutex_unlock(&blkcg_pol_mutex); |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 1006 | return ret; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1007 | } |
| 1008 | |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1009 | /** |
| 1010 | * blkcg_init_queue - initialize blkcg part of request queue |
| 1011 | * @q: request_queue to initialize |
| 1012 | * |
Christoph Hellwig | 3d745ea | 2020-03-27 09:30:11 +0100 | [diff] [blame] | 1013 | * Called from __blk_alloc_queue(). Responsible for initializing blkcg |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1014 | * part of new request_queue @q. |
| 1015 | * |
| 1016 | * RETURNS: |
| 1017 | * 0 on success, -errno on failure. |
| 1018 | */ |
| 1019 | int blkcg_init_queue(struct request_queue *q) |
| 1020 | { |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1021 | struct blkcg_gq *new_blkg, *blkg; |
| 1022 | bool preloaded; |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1023 | int ret; |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1024 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1025 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); |
| 1026 | if (!new_blkg) |
| 1027 | return -ENOMEM; |
| 1028 | |
| 1029 | preloaded = !radix_tree_preload(GFP_KERNEL); |
| 1030 | |
Jiang Biao | bea5488 | 2018-04-19 12:04:26 +0800 | [diff] [blame] | 1031 | /* Make sure the root blkg exists. */ |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1032 | rcu_read_lock(); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1033 | spin_lock_irq(&q->queue_lock); |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1034 | blkg = blkg_create(&blkcg_root, q, new_blkg); |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1035 | if (IS_ERR(blkg)) |
| 1036 | goto err_unlock; |
| 1037 | q->root_blkg = blkg; |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1038 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1039 | rcu_read_unlock(); |
| 1040 | |
Jens Axboe | d708f0d | 2017-03-29 11:25:48 -0600 | [diff] [blame] | 1041 | if (preloaded) |
| 1042 | radix_tree_preload_end(); |
| 1043 | |
Josef Bacik | d706751 | 2018-07-03 11:15:01 -0400 | [diff] [blame] | 1044 | ret = blk_iolatency_init(q); |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1045 | if (ret) |
| 1046 | goto err_destroy_all; |
Josef Bacik | d706751 | 2018-07-03 11:15:01 -0400 | [diff] [blame] | 1047 | |
Tejun Heo | ec13b1d | 2015-05-22 17:13:19 -0400 | [diff] [blame] | 1048 | ret = blk_throtl_init(q); |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1049 | if (ret) |
| 1050 | goto err_destroy_all; |
| 1051 | return 0; |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1052 | |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1053 | err_destroy_all: |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1054 | blkg_destroy_all(q); |
Christoph Hellwig | 04be60b | 2018-11-14 17:02:12 +0100 | [diff] [blame] | 1055 | return ret; |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1056 | err_unlock: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1057 | spin_unlock_irq(&q->queue_lock); |
Jiang Biao | 901932a | 2018-04-19 12:06:09 +0800 | [diff] [blame] | 1058 | rcu_read_unlock(); |
| 1059 | if (preloaded) |
| 1060 | radix_tree_preload_end(); |
| 1061 | return PTR_ERR(blkg); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1062 | } |
| 1063 | |
| 1064 | /** |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1065 | * blkcg_exit_queue - exit and release blkcg part of request_queue |
| 1066 | * @q: request_queue being released |
| 1067 | * |
Marcos Paulo de Souza | 7585d50 | 2019-01-25 00:01:42 -0200 | [diff] [blame] | 1068 | * Called from blk_exit_queue(). Responsible for exiting blkcg part. |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1069 | */ |
| 1070 | void blkcg_exit_queue(struct request_queue *q) |
| 1071 | { |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 1072 | blkg_destroy_all(q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1073 | blk_throtl_exit(q); |
| 1074 | } |
| 1075 | |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1076 | /* |
| 1077 | * We cannot support shared io contexts, as we have no mean to support |
| 1078 | * two tasks with the same ioc in two different groups without major rework |
| 1079 | * of the main cic data structures. For now we allow a task to change |
| 1080 | * its cgroup only if it's the only owner of its ioc. |
| 1081 | */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1082 | static int blkcg_can_attach(struct cgroup_taskset *tset) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1083 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1084 | struct task_struct *task; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1085 | struct cgroup_subsys_state *dst_css; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1086 | struct io_context *ioc; |
| 1087 | int ret = 0; |
| 1088 | |
| 1089 | /* task_lock() is needed to avoid races with exit_io_context() */ |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 1090 | cgroup_taskset_for_each(task, dst_css, tset) { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 1091 | task_lock(task); |
| 1092 | ioc = task->io_context; |
| 1093 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) |
| 1094 | ret = -EINVAL; |
| 1095 | task_unlock(task); |
| 1096 | if (ret) |
| 1097 | break; |
| 1098 | } |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 1099 | return ret; |
| 1100 | } |
| 1101 | |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 1102 | static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 1103 | { |
| 1104 | int i; |
| 1105 | |
| 1106 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 1107 | dst->bytes[i] = src->bytes[i]; |
| 1108 | dst->ios[i] = src->ios[i]; |
| 1109 | } |
| 1110 | } |
| 1111 | |
| 1112 | static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 1113 | { |
| 1114 | int i; |
| 1115 | |
| 1116 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 1117 | dst->bytes[i] += src->bytes[i]; |
| 1118 | dst->ios[i] += src->ios[i]; |
| 1119 | } |
| 1120 | } |
| 1121 | |
| 1122 | static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) |
| 1123 | { |
| 1124 | int i; |
| 1125 | |
| 1126 | for (i = 0; i < BLKG_IOSTAT_NR; i++) { |
| 1127 | dst->bytes[i] -= src->bytes[i]; |
| 1128 | dst->ios[i] -= src->ios[i]; |
| 1129 | } |
| 1130 | } |
| 1131 | |
| 1132 | static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) |
| 1133 | { |
| 1134 | struct blkcg *blkcg = css_to_blkcg(css); |
| 1135 | struct blkcg_gq *blkg; |
| 1136 | |
| 1137 | rcu_read_lock(); |
| 1138 | |
| 1139 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
| 1140 | struct blkcg_gq *parent = blkg->parent; |
| 1141 | struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); |
| 1142 | struct blkg_iostat cur, delta; |
| 1143 | unsigned seq; |
| 1144 | |
| 1145 | /* fetch the current per-cpu values */ |
| 1146 | do { |
| 1147 | seq = u64_stats_fetch_begin(&bisc->sync); |
| 1148 | blkg_iostat_set(&cur, &bisc->cur); |
| 1149 | } while (u64_stats_fetch_retry(&bisc->sync, seq)); |
| 1150 | |
| 1151 | /* propagate percpu delta to global */ |
| 1152 | u64_stats_update_begin(&blkg->iostat.sync); |
| 1153 | blkg_iostat_set(&delta, &cur); |
| 1154 | blkg_iostat_sub(&delta, &bisc->last); |
| 1155 | blkg_iostat_add(&blkg->iostat.cur, &delta); |
| 1156 | blkg_iostat_add(&bisc->last, &delta); |
| 1157 | u64_stats_update_end(&blkg->iostat.sync); |
| 1158 | |
| 1159 | /* propagate global delta to parent */ |
| 1160 | if (parent) { |
| 1161 | u64_stats_update_begin(&parent->iostat.sync); |
| 1162 | blkg_iostat_set(&delta, &blkg->iostat.cur); |
| 1163 | blkg_iostat_sub(&delta, &blkg->iostat.last); |
| 1164 | blkg_iostat_add(&parent->iostat.cur, &delta); |
| 1165 | blkg_iostat_add(&blkg->iostat.last, &delta); |
| 1166 | u64_stats_update_end(&parent->iostat.sync); |
| 1167 | } |
| 1168 | } |
| 1169 | |
| 1170 | rcu_read_unlock(); |
| 1171 | } |
| 1172 | |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 1173 | static void blkcg_bind(struct cgroup_subsys_state *root_css) |
| 1174 | { |
| 1175 | int i; |
| 1176 | |
| 1177 | mutex_lock(&blkcg_pol_mutex); |
| 1178 | |
| 1179 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
| 1180 | struct blkcg_policy *pol = blkcg_policy[i]; |
| 1181 | struct blkcg *blkcg; |
| 1182 | |
| 1183 | if (!pol || !pol->cpd_bind_fn) |
| 1184 | continue; |
| 1185 | |
| 1186 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) |
| 1187 | if (blkcg->cpd[pol->plid]) |
| 1188 | pol->cpd_bind_fn(blkcg->cpd[pol->plid]); |
| 1189 | } |
| 1190 | mutex_unlock(&blkcg_pol_mutex); |
| 1191 | } |
| 1192 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1193 | static void blkcg_exit(struct task_struct *tsk) |
| 1194 | { |
| 1195 | if (tsk->throttle_queue) |
| 1196 | blk_put_queue(tsk->throttle_queue); |
| 1197 | tsk->throttle_queue = NULL; |
| 1198 | } |
| 1199 | |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1200 | struct cgroup_subsys io_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 1201 | .css_alloc = blkcg_css_alloc, |
| 1202 | .css_offline = blkcg_css_offline, |
| 1203 | .css_free = blkcg_css_free, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1204 | .can_attach = blkcg_can_attach, |
Tejun Heo | f733164 | 2019-11-07 11:18:03 -0800 | [diff] [blame] | 1205 | .css_rstat_flush = blkcg_rstat_flush, |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 1206 | .bind = blkcg_bind, |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1207 | .dfl_cftypes = blkcg_files, |
Tejun Heo | 880f50e | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1208 | .legacy_cftypes = blkcg_legacy_files, |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1209 | .legacy_name = "blkio", |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1210 | .exit = blkcg_exit, |
Tejun Heo | 1ced953 | 2014-07-08 18:02:57 -0400 | [diff] [blame] | 1211 | #ifdef CONFIG_MEMCG |
| 1212 | /* |
| 1213 | * This ensures that, if available, memcg is automatically enabled |
| 1214 | * together on the default hierarchy so that the owner cgroup can |
| 1215 | * be retrieved from writeback pages. |
| 1216 | */ |
| 1217 | .depends_on = 1 << memory_cgrp_id, |
| 1218 | #endif |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1219 | }; |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1220 | EXPORT_SYMBOL_GPL(io_cgrp_subsys); |
Tejun Heo | 676f7c8 | 2012-04-01 12:09:55 -0700 | [diff] [blame] | 1221 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1222 | /** |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1223 | * blkcg_activate_policy - activate a blkcg policy on a request_queue |
| 1224 | * @q: request_queue of interest |
| 1225 | * @pol: blkcg policy to activate |
| 1226 | * |
| 1227 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through |
| 1228 | * bypass mode to populate its blkgs with policy_data for @pol. |
| 1229 | * |
| 1230 | * Activation happens with @q bypassed, so nobody would be accessing blkgs |
| 1231 | * from IO path. Update of each blkg is protected by both queue and blkcg |
| 1232 | * locks so that holding either lock and testing blkcg_policy_enabled() is |
| 1233 | * always enough for dereferencing policy data. |
| 1234 | * |
| 1235 | * The caller is responsible for synchronizing [de]activations and policy |
| 1236 | * [un]registerations. Returns 0 on success, -errno on failure. |
| 1237 | */ |
| 1238 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1239 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1240 | { |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1241 | struct blkg_policy_data *pd_prealloc = NULL; |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1242 | struct blkcg_gq *blkg, *pinned_blkg = NULL; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1243 | int ret; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1244 | |
| 1245 | if (blkcg_policy_enabled(q, pol)) |
| 1246 | return 0; |
| 1247 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1248 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1249 | blk_mq_freeze_queue(q); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1250 | retry: |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1251 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1252 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1253 | /* blkg_list is pushed at the head, reverse walk to allocate parents first */ |
Tejun Heo | 71c8140 | 2019-06-13 15:30:40 -0700 | [diff] [blame] | 1254 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1255 | struct blkg_policy_data *pd; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1256 | |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1257 | if (blkg->pd[pol->plid]) |
| 1258 | continue; |
| 1259 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1260 | /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ |
| 1261 | if (blkg == pinned_blkg) { |
| 1262 | pd = pd_prealloc; |
| 1263 | pd_prealloc = NULL; |
| 1264 | } else { |
| 1265 | pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, |
| 1266 | blkg->blkcg); |
| 1267 | } |
| 1268 | |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1269 | if (!pd) { |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1270 | /* |
| 1271 | * GFP_NOWAIT failed. Free the existing one and |
| 1272 | * prealloc for @blkg w/ GFP_KERNEL. |
| 1273 | */ |
| 1274 | if (pinned_blkg) |
| 1275 | blkg_put(pinned_blkg); |
| 1276 | blkg_get(blkg); |
| 1277 | pinned_blkg = blkg; |
| 1278 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1279 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1280 | |
| 1281 | if (pd_prealloc) |
| 1282 | pol->pd_free_fn(pd_prealloc); |
| 1283 | pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, |
| 1284 | blkg->blkcg); |
| 1285 | if (pd_prealloc) |
| 1286 | goto retry; |
| 1287 | else |
| 1288 | goto enomem; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1289 | } |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1290 | |
| 1291 | blkg->pd[pol->plid] = pd; |
| 1292 | pd->blkg = blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 1293 | pd->plid = pol->plid; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1294 | } |
| 1295 | |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1296 | /* all allocated, init in the same order */ |
| 1297 | if (pol->pd_init_fn) |
| 1298 | list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) |
| 1299 | pol->pd_init_fn(blkg->pd[pol->plid]); |
| 1300 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1301 | __set_bit(pol->plid, q->blkcg_pols); |
| 1302 | ret = 0; |
Tejun Heo | 4c55f4f | 2015-08-18 14:55:09 -0700 | [diff] [blame] | 1303 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1304 | spin_unlock_irq(&q->queue_lock); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1305 | out: |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1306 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1307 | blk_mq_unfreeze_queue(q); |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1308 | if (pinned_blkg) |
| 1309 | blkg_put(pinned_blkg); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1310 | if (pd_prealloc) |
| 1311 | pol->pd_free_fn(pd_prealloc); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1312 | return ret; |
Tejun Heo | 9d179b8 | 2019-10-15 09:03:47 -0700 | [diff] [blame] | 1313 | |
| 1314 | enomem: |
| 1315 | /* alloc failed, nothing's initialized yet, free everything */ |
| 1316 | spin_lock_irq(&q->queue_lock); |
| 1317 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
| 1318 | if (blkg->pd[pol->plid]) { |
| 1319 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1320 | blkg->pd[pol->plid] = NULL; |
| 1321 | } |
| 1322 | } |
| 1323 | spin_unlock_irq(&q->queue_lock); |
| 1324 | ret = -ENOMEM; |
| 1325 | goto out; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1326 | } |
| 1327 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); |
| 1328 | |
| 1329 | /** |
| 1330 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue |
| 1331 | * @q: request_queue of interest |
| 1332 | * @pol: blkcg policy to deactivate |
| 1333 | * |
| 1334 | * Deactivate @pol on @q. Follows the same synchronization rules as |
| 1335 | * blkcg_activate_policy(). |
| 1336 | */ |
| 1337 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1338 | const struct blkcg_policy *pol) |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1339 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1340 | struct blkcg_gq *blkg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1341 | |
| 1342 | if (!blkcg_policy_enabled(q, pol)) |
| 1343 | return; |
| 1344 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1345 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1346 | blk_mq_freeze_queue(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1347 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1348 | spin_lock_irq(&q->queue_lock); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1349 | |
| 1350 | __clear_bit(pol->plid, q->blkcg_pols); |
| 1351 | |
| 1352 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1353 | if (blkg->pd[pol->plid]) { |
Dennis Zhou (Facebook) | 6b06546 | 2018-08-31 16:22:42 -0400 | [diff] [blame] | 1354 | if (pol->pd_offline_fn) |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 1355 | pol->pd_offline_fn(blkg->pd[pol->plid]); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 1356 | pol->pd_free_fn(blkg->pd[pol->plid]); |
| 1357 | blkg->pd[pol->plid] = NULL; |
| 1358 | } |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1359 | } |
| 1360 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 1361 | spin_unlock_irq(&q->queue_lock); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1362 | |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 1363 | if (queue_is_mq(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1364 | blk_mq_unfreeze_queue(q); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1365 | } |
| 1366 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); |
| 1367 | |
| 1368 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1369 | * blkcg_policy_register - register a blkcg policy |
| 1370 | * @pol: blkcg policy to register |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1371 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1372 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
| 1373 | * successful registration. Returns 0 on success and -errno on failure. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1374 | */ |
Jens Axboe | d5bf029 | 2014-06-22 16:31:56 -0600 | [diff] [blame] | 1375 | int blkcg_policy_register(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1376 | { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1377 | struct blkcg *blkcg; |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1378 | int i, ret; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 1379 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1380 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1381 | mutex_lock(&blkcg_pol_mutex); |
| 1382 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1383 | /* find an empty slot */ |
| 1384 | ret = -ENOSPC; |
| 1385 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1386 | if (!blkcg_policy[i]) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1387 | break; |
Jens Axboe | 01c5f85 | 2018-09-11 10:59:53 -0600 | [diff] [blame] | 1388 | if (i >= BLKCG_MAX_POLS) { |
| 1389 | pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1390 | goto err_unlock; |
Jens Axboe | 01c5f85 | 2018-09-11 10:59:53 -0600 | [diff] [blame] | 1391 | } |
Tejun Heo | 035d10b | 2012-03-05 13:15:04 -0800 | [diff] [blame] | 1392 | |
weiping zhang | e840107 | 2017-10-17 23:56:21 +0800 | [diff] [blame] | 1393 | /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ |
| 1394 | if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || |
| 1395 | (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) |
| 1396 | goto err_unlock; |
| 1397 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1398 | /* register @pol */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1399 | pol->plid = i; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1400 | blkcg_policy[pol->plid] = pol; |
| 1401 | |
| 1402 | /* allocate and install cpd's */ |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1403 | if (pol->cpd_alloc_fn) { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1404 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
| 1405 | struct blkcg_policy_data *cpd; |
| 1406 | |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1407 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
Bart Van Assche | bbb427e | 2016-09-29 08:33:30 -0700 | [diff] [blame] | 1408 | if (!cpd) |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1409 | goto err_free_cpds; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1410 | |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 1411 | blkcg->cpd[pol->plid] = cpd; |
| 1412 | cpd->blkcg = blkcg; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1413 | cpd->plid = pol->plid; |
Tejun Heo | 86a5bba | 2019-08-28 15:05:52 -0700 | [diff] [blame] | 1414 | if (pol->cpd_init_fn) |
| 1415 | pol->cpd_init_fn(cpd); |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1416 | } |
| 1417 | } |
| 1418 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1419 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1420 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1421 | /* everything is in place, add intf files for the new policy */ |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1422 | if (pol->dfl_cftypes) |
| 1423 | WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, |
| 1424 | pol->dfl_cftypes)); |
Tejun Heo | 880f50e | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1425 | if (pol->legacy_cftypes) |
Tejun Heo | c165b3e | 2015-08-18 14:55:29 -0700 | [diff] [blame] | 1426 | WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, |
Tejun Heo | 880f50e | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1427 | pol->legacy_cftypes)); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1428 | mutex_unlock(&blkcg_pol_register_mutex); |
| 1429 | return 0; |
| 1430 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1431 | err_free_cpds: |
weiping zhang | 58a9edc | 2017-10-10 22:53:46 +0800 | [diff] [blame] | 1432 | if (pol->cpd_free_fn) { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1433 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1434 | if (blkcg->cpd[pol->plid]) { |
| 1435 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); |
| 1436 | blkcg->cpd[pol->plid] = NULL; |
| 1437 | } |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1438 | } |
| 1439 | } |
| 1440 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1441 | err_unlock: |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1442 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1443 | mutex_unlock(&blkcg_pol_register_mutex); |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1444 | return ret; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1445 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1446 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1447 | |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1448 | /** |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1449 | * blkcg_policy_unregister - unregister a blkcg policy |
| 1450 | * @pol: blkcg policy to unregister |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1451 | * |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1452 | * Undo blkcg_policy_register(@pol). Might sleep. |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1453 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1454 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1455 | { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1456 | struct blkcg *blkcg; |
| 1457 | |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1458 | mutex_lock(&blkcg_pol_register_mutex); |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1459 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1460 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 1461 | goto out_unlock; |
| 1462 | |
| 1463 | /* kill the intf files first */ |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 1464 | if (pol->dfl_cftypes) |
| 1465 | cgroup_rm_cftypes(pol->dfl_cftypes); |
Tejun Heo | 880f50e | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 1466 | if (pol->legacy_cftypes) |
| 1467 | cgroup_rm_cftypes(pol->legacy_cftypes); |
Tejun Heo | 44ea53d | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1468 | |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1469 | /* remove cpds and unregister */ |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1470 | mutex_lock(&blkcg_pol_mutex); |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1471 | |
weiping zhang | 58a9edc | 2017-10-10 22:53:46 +0800 | [diff] [blame] | 1472 | if (pol->cpd_free_fn) { |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1473 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 1474 | if (blkcg->cpd[pol->plid]) { |
| 1475 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); |
| 1476 | blkcg->cpd[pol->plid] = NULL; |
| 1477 | } |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1478 | } |
| 1479 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1480 | blkcg_policy[pol->plid] = NULL; |
Tejun Heo | 06b285b | 2015-07-09 16:39:50 -0400 | [diff] [blame] | 1481 | |
Tejun Heo | bc0d650 | 2012-04-13 13:11:26 -0700 | [diff] [blame] | 1482 | mutex_unlock(&blkcg_pol_mutex); |
Tejun Heo | 838f13b | 2015-07-09 16:39:47 -0400 | [diff] [blame] | 1483 | out_unlock: |
| 1484 | mutex_unlock(&blkcg_pol_register_mutex); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1485 | } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1486 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 1487 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 1488 | bool __blkcg_punt_bio_submit(struct bio *bio) |
| 1489 | { |
| 1490 | struct blkcg_gq *blkg = bio->bi_blkg; |
| 1491 | |
| 1492 | /* consume the flag first */ |
| 1493 | bio->bi_opf &= ~REQ_CGROUP_PUNT; |
| 1494 | |
| 1495 | /* never bounce for the root cgroup */ |
| 1496 | if (!blkg->parent) |
| 1497 | return false; |
| 1498 | |
| 1499 | spin_lock_bh(&blkg->async_bio_lock); |
| 1500 | bio_list_add(&blkg->async_bios, bio); |
| 1501 | spin_unlock_bh(&blkg->async_bio_lock); |
| 1502 | |
| 1503 | queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); |
| 1504 | return true; |
| 1505 | } |
| 1506 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1507 | /* |
| 1508 | * Scale the accumulated delay based on how long it has been since we updated |
| 1509 | * the delay. We only call this when we are adding delay, in case it's been a |
| 1510 | * while since we added delay, and when we are checking to see if we need to |
| 1511 | * delay a task, to account for any delays that may have occurred. |
| 1512 | */ |
| 1513 | static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) |
| 1514 | { |
| 1515 | u64 old = atomic64_read(&blkg->delay_start); |
| 1516 | |
| 1517 | /* |
| 1518 | * We only want to scale down every second. The idea here is that we |
| 1519 | * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain |
| 1520 | * time window. We only want to throttle tasks for recent delay that |
| 1521 | * has occurred, in 1 second time windows since that's the maximum |
| 1522 | * things can be throttled. We save the current delay window in |
| 1523 | * blkg->last_delay so we know what amount is still left to be charged |
| 1524 | * to the blkg from this point onward. blkg->last_use keeps track of |
| 1525 | * the use_delay counter. The idea is if we're unthrottling the blkg we |
| 1526 | * are ok with whatever is happening now, and we can take away more of |
| 1527 | * the accumulated delay as we've already throttled enough that |
| 1528 | * everybody is happy with their IO latencies. |
| 1529 | */ |
| 1530 | if (time_before64(old + NSEC_PER_SEC, now) && |
| 1531 | atomic64_cmpxchg(&blkg->delay_start, old, now) == old) { |
| 1532 | u64 cur = atomic64_read(&blkg->delay_nsec); |
| 1533 | u64 sub = min_t(u64, blkg->last_delay, now - old); |
| 1534 | int cur_use = atomic_read(&blkg->use_delay); |
| 1535 | |
| 1536 | /* |
| 1537 | * We've been unthrottled, subtract a larger chunk of our |
| 1538 | * accumulated delay. |
| 1539 | */ |
| 1540 | if (cur_use < blkg->last_use) |
| 1541 | sub = max_t(u64, sub, blkg->last_delay >> 1); |
| 1542 | |
| 1543 | /* |
| 1544 | * This shouldn't happen, but handle it anyway. Our delay_nsec |
| 1545 | * should only ever be growing except here where we subtract out |
| 1546 | * min(last_delay, 1 second), but lord knows bugs happen and I'd |
| 1547 | * rather not end up with negative numbers. |
| 1548 | */ |
| 1549 | if (unlikely(cur < sub)) { |
| 1550 | atomic64_set(&blkg->delay_nsec, 0); |
| 1551 | blkg->last_delay = 0; |
| 1552 | } else { |
| 1553 | atomic64_sub(sub, &blkg->delay_nsec); |
| 1554 | blkg->last_delay = cur - sub; |
| 1555 | } |
| 1556 | blkg->last_use = cur_use; |
| 1557 | } |
| 1558 | } |
| 1559 | |
| 1560 | /* |
| 1561 | * This is called when we want to actually walk up the hierarchy and check to |
| 1562 | * see if we need to throttle, and then actually throttle if there is some |
| 1563 | * accumulated delay. This should only be called upon return to user space so |
| 1564 | * we're not holding some lock that would induce a priority inversion. |
| 1565 | */ |
| 1566 | static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) |
| 1567 | { |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1568 | unsigned long pflags; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1569 | u64 now = ktime_to_ns(ktime_get()); |
| 1570 | u64 exp; |
| 1571 | u64 delay_nsec = 0; |
| 1572 | int tok; |
| 1573 | |
| 1574 | while (blkg->parent) { |
| 1575 | if (atomic_read(&blkg->use_delay)) { |
| 1576 | blkcg_scale_delay(blkg, now); |
| 1577 | delay_nsec = max_t(u64, delay_nsec, |
| 1578 | atomic64_read(&blkg->delay_nsec)); |
| 1579 | } |
| 1580 | blkg = blkg->parent; |
| 1581 | } |
| 1582 | |
| 1583 | if (!delay_nsec) |
| 1584 | return; |
| 1585 | |
| 1586 | /* |
| 1587 | * Let's not sleep for all eternity if we've amassed a huge delay. |
| 1588 | * Swapping or metadata IO can accumulate 10's of seconds worth of |
| 1589 | * delay, and we want userspace to be able to do _something_ so cap the |
| 1590 | * delays at 1 second. If there's 10's of seconds worth of delay then |
| 1591 | * the tasks will be delayed for 1 second for every syscall. |
| 1592 | */ |
| 1593 | delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); |
| 1594 | |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1595 | if (use_memdelay) |
| 1596 | psi_memstall_enter(&pflags); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1597 | |
| 1598 | exp = ktime_add_ns(now, delay_nsec); |
| 1599 | tok = io_schedule_prepare(); |
| 1600 | do { |
| 1601 | __set_current_state(TASK_KILLABLE); |
| 1602 | if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) |
| 1603 | break; |
| 1604 | } while (!fatal_signal_pending(current)); |
| 1605 | io_schedule_finish(tok); |
Josef Bacik | fd112c7 | 2019-07-09 14:41:29 -0700 | [diff] [blame] | 1606 | |
| 1607 | if (use_memdelay) |
| 1608 | psi_memstall_leave(&pflags); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1609 | } |
| 1610 | |
| 1611 | /** |
| 1612 | * blkcg_maybe_throttle_current - throttle the current task if it has been marked |
| 1613 | * |
| 1614 | * This is only called if we've been marked with set_notify_resume(). Obviously |
| 1615 | * we can be set_notify_resume() for reasons other than blkcg throttling, so we |
| 1616 | * check to see if current->throttle_queue is set and if not this doesn't do |
| 1617 | * anything. This should only ever be called by the resume code, it's not meant |
| 1618 | * to be called by people willy-nilly as it will actually do the work to |
| 1619 | * throttle the task if it is setup for throttling. |
| 1620 | */ |
| 1621 | void blkcg_maybe_throttle_current(void) |
| 1622 | { |
| 1623 | struct request_queue *q = current->throttle_queue; |
| 1624 | struct cgroup_subsys_state *css; |
| 1625 | struct blkcg *blkcg; |
| 1626 | struct blkcg_gq *blkg; |
| 1627 | bool use_memdelay = current->use_memdelay; |
| 1628 | |
| 1629 | if (!q) |
| 1630 | return; |
| 1631 | |
| 1632 | current->throttle_queue = NULL; |
| 1633 | current->use_memdelay = false; |
| 1634 | |
| 1635 | rcu_read_lock(); |
| 1636 | css = kthread_blkcg(); |
| 1637 | if (css) |
| 1638 | blkcg = css_to_blkcg(css); |
| 1639 | else |
| 1640 | blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); |
| 1641 | |
| 1642 | if (!blkcg) |
| 1643 | goto out; |
| 1644 | blkg = blkg_lookup(blkcg, q); |
| 1645 | if (!blkg) |
| 1646 | goto out; |
Dennis Zhou | 7754f66 | 2018-12-05 12:10:39 -0500 | [diff] [blame] | 1647 | if (!blkg_tryget(blkg)) |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1648 | goto out; |
| 1649 | rcu_read_unlock(); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1650 | |
| 1651 | blkcg_maybe_throttle_blkg(blkg, use_memdelay); |
| 1652 | blkg_put(blkg); |
Josef Bacik | cc7ecc25 | 2018-07-31 12:39:03 -0400 | [diff] [blame] | 1653 | blk_put_queue(q); |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1654 | return; |
| 1655 | out: |
| 1656 | rcu_read_unlock(); |
| 1657 | blk_put_queue(q); |
| 1658 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1659 | |
| 1660 | /** |
| 1661 | * blkcg_schedule_throttle - this task needs to check for throttling |
Bart Van Assche | 537d71b | 2019-03-20 13:18:45 -0700 | [diff] [blame] | 1662 | * @q: the request queue IO was submitted on |
| 1663 | * @use_memdelay: do we charge this to memory delay for PSI |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1664 | * |
| 1665 | * This is called by the IO controller when we know there's delay accumulated |
| 1666 | * for the blkg for this task. We do not pass the blkg because there are places |
| 1667 | * we call this that may not have that information, the swapping code for |
| 1668 | * instance will only have a request_queue at that point. This set's the |
| 1669 | * notify_resume for the task to check and see if it requires throttling before |
| 1670 | * returning to user space. |
| 1671 | * |
| 1672 | * We will only schedule once per syscall. You can call this over and over |
| 1673 | * again and it will only do the check once upon return to user space, and only |
| 1674 | * throttle once. If the task needs to be throttled again it'll need to be |
| 1675 | * re-set at the next time we see the task. |
| 1676 | */ |
| 1677 | void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) |
| 1678 | { |
| 1679 | if (unlikely(current->flags & PF_KTHREAD)) |
| 1680 | return; |
| 1681 | |
| 1682 | if (!blk_get_queue(q)) |
| 1683 | return; |
| 1684 | |
| 1685 | if (current->throttle_queue) |
| 1686 | blk_put_queue(current->throttle_queue); |
| 1687 | current->throttle_queue = q; |
| 1688 | if (use_memdelay) |
| 1689 | current->use_memdelay = use_memdelay; |
| 1690 | set_notify_resume(current); |
| 1691 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1692 | |
| 1693 | /** |
| 1694 | * blkcg_add_delay - add delay to this blkg |
Bart Van Assche | 537d71b | 2019-03-20 13:18:45 -0700 | [diff] [blame] | 1695 | * @blkg: blkg of interest |
| 1696 | * @now: the current time in nanoseconds |
| 1697 | * @delta: how many nanoseconds of delay to add |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1698 | * |
| 1699 | * Charge @delta to the blkg's current delay accumulation. This is used to |
| 1700 | * throttle tasks if an IO controller thinks we need more throttling. |
| 1701 | */ |
| 1702 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) |
| 1703 | { |
| 1704 | blkcg_scale_delay(blkg, now); |
| 1705 | atomic64_add(delta, &blkg->delay_nsec); |
| 1706 | } |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1707 | |
Tejun Heo | d3f77df | 2019-06-27 13:39:52 -0700 | [diff] [blame] | 1708 | static int __init blkcg_init(void) |
| 1709 | { |
| 1710 | blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", |
| 1711 | WQ_MEM_RECLAIM | WQ_FREEZABLE | |
| 1712 | WQ_UNBOUND | WQ_SYSFS, 0); |
| 1713 | if (!blkcg_punt_bio_wq) |
| 1714 | return -ENOMEM; |
| 1715 | return 0; |
| 1716 | } |
| 1717 | subsys_initcall(blkcg_init); |
| 1718 | |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 1719 | module_param(blkcg_debug_stats, bool, 0644); |
| 1720 | MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); |