Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 2 | #ifndef _BLK_CGROUP_H |
| 3 | #define _BLK_CGROUP_H |
| 4 | /* |
| 5 | * Common Block IO controller cgroup interface |
| 6 | * |
| 7 | * Based on ideas and code from CFQ, CFS and BFQ: |
| 8 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 9 | * |
| 10 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 11 | * Paolo Valente <paolo.valente@unimore.it> |
| 12 | * |
| 13 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
| 14 | * Nauman Rafique <nauman@google.com> |
| 15 | */ |
| 16 | |
| 17 | #include <linux/cgroup.h> |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 18 | #include <linux/percpu_counter.h> |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 19 | #include <linux/seq_file.h> |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 20 | #include <linux/radix-tree.h> |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 21 | #include <linux/blkdev.h> |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 22 | #include <linux/atomic.h> |
Shaohua Li | 902ec5b | 2017-09-14 14:02:06 -0700 | [diff] [blame] | 23 | #include <linux/kthread.h> |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 24 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 25 | /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ |
| 26 | #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) |
| 27 | |
Vivek Goyal | 9355aed | 2010-10-01 21:16:41 +0200 | [diff] [blame] | 28 | /* Max limits for throttle policy */ |
| 29 | #define THROTL_IOPS_MAX UINT_MAX |
| 30 | |
Tejun Heo | f48ec1d | 2012-04-13 13:11:25 -0700 | [diff] [blame] | 31 | #ifdef CONFIG_BLK_CGROUP |
| 32 | |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 33 | enum blkg_rwstat_type { |
| 34 | BLKG_RWSTAT_READ, |
| 35 | BLKG_RWSTAT_WRITE, |
| 36 | BLKG_RWSTAT_SYNC, |
| 37 | BLKG_RWSTAT_ASYNC, |
Tejun Heo | 636620b | 2018-07-18 04:47:41 -0700 | [diff] [blame] | 38 | BLKG_RWSTAT_DISCARD, |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 39 | |
| 40 | BLKG_RWSTAT_NR, |
| 41 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, |
Divyesh Shah | 303a3ac | 2010-04-01 15:01:24 -0700 | [diff] [blame] | 42 | }; |
| 43 | |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 44 | struct blkcg_gq; |
| 45 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 46 | struct blkcg { |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 47 | struct cgroup_subsys_state css; |
| 48 | spinlock_t lock; |
Tejun Heo | a637120 | 2012-04-19 16:29:24 -0700 | [diff] [blame] | 49 | |
| 50 | struct radix_tree_root blkg_tree; |
Bart Van Assche | 55679c8 | 2016-09-23 09:07:56 -0700 | [diff] [blame] | 51 | struct blkcg_gq __rcu *blkg_hint; |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 52 | struct hlist_head blkg_list; |
Tejun Heo | 9a9e8a2 | 2012-03-19 15:10:56 -0700 | [diff] [blame] | 53 | |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 54 | struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 55 | |
Tejun Heo | 7876f93 | 2015-07-09 16:39:49 -0400 | [diff] [blame] | 56 | struct list_head all_blkcgs_node; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 57 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 58 | struct list_head cgwb_list; |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 59 | refcount_t cgwb_refcnt; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 60 | #endif |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 61 | }; |
| 62 | |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 63 | /* |
| 64 | * blkg_[rw]stat->aux_cnt is excluded for local stats but included for |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 65 | * recursive. Used to carry stats of dead children, and, for blkg_rwstat, |
| 66 | * to carry result values from read and sum operations. |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 67 | */ |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 68 | struct blkg_stat { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 69 | struct percpu_counter cpu_cnt; |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 70 | atomic64_t aux_cnt; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 71 | }; |
| 72 | |
| 73 | struct blkg_rwstat { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 74 | struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 75 | atomic64_t aux_cnt[BLKG_RWSTAT_NR]; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 76 | }; |
| 77 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 78 | /* |
| 79 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a |
| 80 | * request_queue (q). This is used by blkcg policies which need to track |
| 81 | * information per blkcg - q pair. |
| 82 | * |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 83 | * There can be multiple active blkcg policies and each blkg:policy pair is |
| 84 | * represented by a blkg_policy_data which is allocated and freed by each |
| 85 | * policy's pd_alloc/free_fn() methods. A policy can allocate private data |
| 86 | * area by allocating larger data structure which embeds blkg_policy_data |
| 87 | * at the beginning. |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 88 | */ |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 89 | struct blkg_policy_data { |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 90 | /* the blkg and policy id this per-policy data belongs to */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 91 | struct blkcg_gq *blkg; |
Tejun Heo | b276a87 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 92 | int plid; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 93 | }; |
| 94 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 95 | /* |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 96 | * Policies that need to keep per-blkcg data which is independent from any |
| 97 | * request_queue associated to it should implement cpd_alloc/free_fn() |
| 98 | * methods. A policy can allocate private data area by allocating larger |
| 99 | * data structure which embeds blkcg_policy_data at the beginning. |
| 100 | * cpd_init() is invoked to let each policy handle per-blkcg data. |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 101 | */ |
| 102 | struct blkcg_policy_data { |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 103 | /* the blkcg and policy id this per-policy data belongs to */ |
| 104 | struct blkcg *blkcg; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 105 | int plid; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 106 | }; |
| 107 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 108 | /* association between a blk cgroup and a request queue */ |
| 109 | struct blkcg_gq { |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 110 | /* Pointer to the associated request_queue */ |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 111 | struct request_queue *q; |
| 112 | struct list_head q_node; |
| 113 | struct hlist_node blkcg_node; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 114 | struct blkcg *blkcg; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 115 | |
Tejun Heo | ce7acfe | 2015-05-22 17:13:38 -0400 | [diff] [blame] | 116 | /* |
| 117 | * Each blkg gets congested separately and the congestion state is |
| 118 | * propagated to the matching bdi_writeback_congested. |
| 119 | */ |
| 120 | struct bdi_writeback_congested *wb_congested; |
| 121 | |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 122 | /* all non-root blkcg_gq's are guaranteed to have access to parent */ |
| 123 | struct blkcg_gq *parent; |
| 124 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 125 | /* request allocation list for this blkcg-q pair */ |
| 126 | struct request_list rl; |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 127 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 128 | /* reference count */ |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 129 | atomic_t refcnt; |
Vivek Goyal | 2208419 | 2009-12-03 12:59:49 -0500 | [diff] [blame] | 130 | |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 131 | /* is this blkg online? protected by both blkcg and q locks */ |
| 132 | bool online; |
| 133 | |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 134 | struct blkg_rwstat stat_bytes; |
| 135 | struct blkg_rwstat stat_ios; |
| 136 | |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 137 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 138 | |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 139 | struct rcu_head rcu_head; |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 140 | |
| 141 | atomic_t use_delay; |
| 142 | atomic64_t delay_nsec; |
| 143 | atomic64_t delay_start; |
| 144 | u64 last_delay; |
| 145 | int last_use; |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 146 | }; |
| 147 | |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 148 | typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 149 | typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 150 | typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 151 | typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 152 | typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 153 | typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); |
| 154 | typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); |
| 155 | typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 156 | typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); |
Tejun Heo | a9520cd | 2015-08-18 14:55:14 -0700 | [diff] [blame] | 157 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 158 | typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, |
| 159 | size_t size); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 160 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 161 | struct blkcg_policy { |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 162 | int plid; |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 163 | /* cgroup files for the policy */ |
Tejun Heo | 2ee867dc | 2015-08-18 14:55:34 -0700 | [diff] [blame] | 164 | struct cftype *dfl_cftypes; |
Tejun Heo | 880f50e | 2015-08-18 14:55:30 -0700 | [diff] [blame] | 165 | struct cftype *legacy_cftypes; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 166 | |
| 167 | /* operations */ |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 168 | blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 169 | blkcg_pol_init_cpd_fn *cpd_init_fn; |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 170 | blkcg_pol_free_cpd_fn *cpd_free_fn; |
Tejun Heo | 69d7fde | 2015-08-18 14:55:36 -0700 | [diff] [blame] | 171 | blkcg_pol_bind_cpd_fn *cpd_bind_fn; |
Tejun Heo | e4a9bde | 2015-08-18 14:55:16 -0700 | [diff] [blame] | 172 | |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 173 | blkcg_pol_alloc_pd_fn *pd_alloc_fn; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 174 | blkcg_pol_init_pd_fn *pd_init_fn; |
Tejun Heo | f427d90 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 175 | blkcg_pol_online_pd_fn *pd_online_fn; |
| 176 | blkcg_pol_offline_pd_fn *pd_offline_fn; |
Tejun Heo | 001bea7 | 2015-08-18 14:55:11 -0700 | [diff] [blame] | 177 | blkcg_pol_free_pd_fn *pd_free_fn; |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 178 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; |
Josef Bacik | 903d23f | 2018-07-03 11:14:52 -0400 | [diff] [blame] | 179 | blkcg_pol_stat_pd_fn *pd_stat_fn; |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 180 | }; |
| 181 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 182 | extern struct blkcg blkcg_root; |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 183 | extern struct cgroup_subsys_state * const blkcg_root_css; |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 184 | |
Tejun Heo | 24f2904 | 2015-08-18 14:55:17 -0700 | [diff] [blame] | 185 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
| 186 | struct request_queue *q, bool update_hint); |
Dennis Zhou (Facebook) | 49f4c2d | 2018-09-11 14:41:27 -0400 | [diff] [blame^] | 187 | struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
| 188 | struct request_queue *q); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 189 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 190 | struct request_queue *q); |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 191 | int blkcg_init_queue(struct request_queue *q); |
| 192 | void blkcg_drain_queue(struct request_queue *q); |
| 193 | void blkcg_exit_queue(struct request_queue *q); |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 194 | |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 195 | /* Blkio controller policy registration */ |
Jens Axboe | d5bf029 | 2014-06-22 16:31:56 -0600 | [diff] [blame] | 196 | int blkcg_policy_register(struct blkcg_policy *pol); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 197 | void blkcg_policy_unregister(struct blkcg_policy *pol); |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 198 | int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 199 | const struct blkcg_policy *pol); |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 200 | void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 201 | const struct blkcg_policy *pol); |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 202 | |
Tejun Heo | dd165eb | 2015-08-18 14:55:33 -0700 | [diff] [blame] | 203 | const char *blkg_dev_name(struct blkcg_gq *blkg); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 204 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 205 | u64 (*prfill)(struct seq_file *, |
| 206 | struct blkg_policy_data *, int), |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 207 | const struct blkcg_policy *pol, int data, |
Tejun Heo | ec39934 | 2012-04-13 13:11:27 -0700 | [diff] [blame] | 208 | bool show_total); |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 209 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
| 210 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 211 | const struct blkg_rwstat *rwstat); |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 212 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
| 213 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 214 | int off); |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 215 | int blkg_print_stat_bytes(struct seq_file *sf, void *v); |
| 216 | int blkg_print_stat_ios(struct seq_file *sf, void *v); |
| 217 | int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); |
| 218 | int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 219 | |
Tejun Heo | f12c74c | 2015-08-18 14:55:23 -0700 | [diff] [blame] | 220 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, |
| 221 | struct blkcg_policy *pol, int off); |
| 222 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, |
| 223 | struct blkcg_policy *pol, int off); |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 224 | |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 225 | struct blkg_conf_ctx { |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 226 | struct gendisk *disk; |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 227 | struct blkcg_gq *blkg; |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 228 | char *body; |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 229 | }; |
| 230 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 231 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
Tejun Heo | 36aa9e5 | 2015-08-18 14:55:31 -0700 | [diff] [blame] | 232 | char *input, struct blkg_conf_ctx *ctx); |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 233 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
| 234 | |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 235 | /** |
| 236 | * blkcg_css - find the current css |
| 237 | * |
| 238 | * Find the css associated with either the kthread or the current task. |
| 239 | * This may return a dying css, so it is up to the caller to use tryget logic |
| 240 | * to confirm it is alive and well. |
| 241 | */ |
| 242 | static inline struct cgroup_subsys_state *blkcg_css(void) |
| 243 | { |
| 244 | struct cgroup_subsys_state *css; |
| 245 | |
| 246 | css = kthread_blkcg(); |
| 247 | if (css) |
| 248 | return css; |
| 249 | return task_css(current, io_cgrp_id); |
| 250 | } |
| 251 | |
| 252 | /** |
| 253 | * blkcg_get_css - find and get a reference to the css |
| 254 | * |
| 255 | * Find the css associated with either the kthread or the current task. |
| 256 | * This takes a reference on the blkcg which will need to be managed by the |
| 257 | * caller. |
| 258 | */ |
| 259 | static inline struct cgroup_subsys_state *blkcg_get_css(void) |
| 260 | { |
| 261 | struct cgroup_subsys_state *css; |
| 262 | |
| 263 | rcu_read_lock(); |
| 264 | |
| 265 | css = kthread_blkcg(); |
| 266 | if (css) { |
| 267 | css_get(css); |
| 268 | } else { |
| 269 | /* |
| 270 | * This is a bit complicated. It is possible task_css is seeing |
| 271 | * an old css pointer here. This is caused by the current |
| 272 | * thread migrating away from this cgroup and this cgroup dying. |
| 273 | * css_tryget() will fail when trying to take a ref on a cgroup |
| 274 | * that's ref count has hit 0. |
| 275 | * |
| 276 | * Therefore, if it does fail, this means current must have |
| 277 | * been swapped away already and this is waiting for it to |
| 278 | * propagate on the polling cpu. Hence the use of cpu_relax(). |
| 279 | */ |
| 280 | while (true) { |
| 281 | css = task_css(current, io_cgrp_id); |
| 282 | if (likely(css_tryget(css))) |
| 283 | break; |
| 284 | cpu_relax(); |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | rcu_read_unlock(); |
| 289 | |
| 290 | return css; |
| 291 | } |
Tejun Heo | 829fdb5 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 292 | |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 293 | static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) |
| 294 | { |
| 295 | return css ? container_of(css, struct blkcg, css) : NULL; |
| 296 | } |
| 297 | |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 298 | /** |
| 299 | * __bio_blkcg - internal version of bio_blkcg for bfq and cfq |
| 300 | * |
| 301 | * DO NOT USE. |
| 302 | * There is a flaw using this version of the function. In particular, this was |
| 303 | * used in a broken paradigm where association was called on the given css. It |
| 304 | * is possible though that the returned css from task_css() is in the process |
| 305 | * of dying due to migration of the current task. So it is improper to assume |
| 306 | * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will |
| 307 | * take additional work to handle more gracefully. |
| 308 | */ |
| 309 | static inline struct blkcg *__bio_blkcg(struct bio *bio) |
Tejun Heo | b1208b5 | 2012-06-04 20:40:57 -0700 | [diff] [blame] | 310 | { |
| 311 | if (bio && bio->bi_css) |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 312 | return css_to_blkcg(bio->bi_css); |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 313 | return css_to_blkcg(blkcg_css()); |
| 314 | } |
| 315 | |
| 316 | /** |
| 317 | * bio_blkcg - grab the blkcg associated with a bio |
| 318 | * @bio: target bio |
| 319 | * |
| 320 | * This returns the blkcg associated with a bio, NULL if not associated. |
| 321 | * Callers are expected to either handle NULL or know association has been |
| 322 | * done prior to calling this. |
| 323 | */ |
| 324 | static inline struct blkcg *bio_blkcg(struct bio *bio) |
| 325 | { |
| 326 | if (bio && bio->bi_css) |
| 327 | return css_to_blkcg(bio->bi_css); |
| 328 | return NULL; |
Tejun Heo | fd383c2 | 2015-05-22 17:13:23 -0400 | [diff] [blame] | 329 | } |
| 330 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 331 | static inline bool blk_cgroup_congested(void) |
| 332 | { |
| 333 | struct cgroup_subsys_state *css; |
| 334 | bool ret = false; |
| 335 | |
| 336 | rcu_read_lock(); |
| 337 | css = kthread_blkcg(); |
| 338 | if (!css) |
| 339 | css = task_css(current, io_cgrp_id); |
| 340 | while (css) { |
| 341 | if (atomic_read(&css->cgroup->congestion_count)) { |
| 342 | ret = true; |
| 343 | break; |
| 344 | } |
| 345 | css = css->parent; |
| 346 | } |
| 347 | rcu_read_unlock(); |
| 348 | return ret; |
| 349 | } |
| 350 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 351 | /** |
Josef Bacik | c7c98fd | 2018-07-03 11:14:51 -0400 | [diff] [blame] | 352 | * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg |
| 353 | * @return: true if this bio needs to be submitted with the root blkg context. |
| 354 | * |
| 355 | * In order to avoid priority inversions we sometimes need to issue a bio as if |
| 356 | * it were attached to the root blkg, and then backcharge to the actual owning |
| 357 | * blkg. The idea is we do bio_blkcg() to look up the actual context for the |
| 358 | * bio and attach the appropriate blkg to the bio. Then we call this helper and |
| 359 | * if it is true run with the root blkg for that queue and then do any |
| 360 | * backcharging to the originating cgroup once the io is complete. |
| 361 | */ |
| 362 | static inline bool bio_issue_as_root_blkg(struct bio *bio) |
| 363 | { |
Josef Bacik | 0d1e0c7 | 2018-07-03 11:14:53 -0400 | [diff] [blame] | 364 | return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; |
Josef Bacik | c7c98fd | 2018-07-03 11:14:51 -0400 | [diff] [blame] | 365 | } |
| 366 | |
| 367 | /** |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 368 | * blkcg_parent - get the parent of a blkcg |
| 369 | * @blkcg: blkcg of interest |
| 370 | * |
| 371 | * Return the parent blkcg of @blkcg. Can be called anytime. |
| 372 | */ |
| 373 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) |
| 374 | { |
Tejun Heo | 5c9d535 | 2014-05-16 13:22:48 -0400 | [diff] [blame] | 375 | return css_to_blkcg(blkcg->css.parent); |
Tejun Heo | 3c54786 | 2013-01-09 08:05:10 -0800 | [diff] [blame] | 376 | } |
| 377 | |
| 378 | /** |
Tejun Heo | 24f2904 | 2015-08-18 14:55:17 -0700 | [diff] [blame] | 379 | * __blkg_lookup - internal version of blkg_lookup() |
| 380 | * @blkcg: blkcg of interest |
| 381 | * @q: request_queue of interest |
| 382 | * @update_hint: whether to update lookup hint with the result or not |
| 383 | * |
| 384 | * This is internal version and shouldn't be used by policy |
| 385 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of |
| 386 | * @q's bypass state. If @update_hint is %true, the caller should be |
| 387 | * holding @q->queue_lock and lookup hint is updated on success. |
| 388 | */ |
| 389 | static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
| 390 | struct request_queue *q, |
| 391 | bool update_hint) |
| 392 | { |
| 393 | struct blkcg_gq *blkg; |
| 394 | |
Tejun Heo | 85b6bc9 | 2015-08-18 14:55:18 -0700 | [diff] [blame] | 395 | if (blkcg == &blkcg_root) |
| 396 | return q->root_blkg; |
| 397 | |
Tejun Heo | 24f2904 | 2015-08-18 14:55:17 -0700 | [diff] [blame] | 398 | blkg = rcu_dereference(blkcg->blkg_hint); |
| 399 | if (blkg && blkg->q == q) |
| 400 | return blkg; |
| 401 | |
| 402 | return blkg_lookup_slowpath(blkcg, q, update_hint); |
| 403 | } |
| 404 | |
| 405 | /** |
| 406 | * blkg_lookup - lookup blkg for the specified blkcg - q pair |
| 407 | * @blkcg: blkcg of interest |
| 408 | * @q: request_queue of interest |
| 409 | * |
| 410 | * Lookup blkg for the @blkcg - @q pair. This function should be called |
| 411 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing |
| 412 | * - see blk_queue_bypass_start() for details. |
| 413 | */ |
| 414 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, |
| 415 | struct request_queue *q) |
| 416 | { |
| 417 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 418 | |
| 419 | if (unlikely(blk_queue_bypass(q))) |
| 420 | return NULL; |
| 421 | return __blkg_lookup(blkcg, q, false); |
| 422 | } |
| 423 | |
| 424 | /** |
Bart Van Assche | b86d865 | 2018-08-10 13:28:07 -0700 | [diff] [blame] | 425 | * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair |
Bart Van Assche | 6bad9b2 | 2018-08-09 07:53:36 -0700 | [diff] [blame] | 426 | * @q: request_queue of interest |
| 427 | * |
| 428 | * Lookup blkg for @q at the root level. See also blkg_lookup(). |
| 429 | */ |
Bart Van Assche | b86d865 | 2018-08-10 13:28:07 -0700 | [diff] [blame] | 430 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) |
Bart Van Assche | 6bad9b2 | 2018-08-09 07:53:36 -0700 | [diff] [blame] | 431 | { |
Bart Van Assche | b86d865 | 2018-08-10 13:28:07 -0700 | [diff] [blame] | 432 | return q->root_blkg; |
Bart Van Assche | 6bad9b2 | 2018-08-09 07:53:36 -0700 | [diff] [blame] | 433 | } |
| 434 | |
| 435 | /** |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 436 | * blkg_to_pdata - get policy private data |
| 437 | * @blkg: blkg of interest |
| 438 | * @pol: policy of interest |
| 439 | * |
| 440 | * Return pointer to private data associated with the @blkg-@pol pair. |
| 441 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 442 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
| 443 | struct blkcg_policy *pol) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 444 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 445 | return blkg ? blkg->pd[pol->plid] : NULL; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 446 | } |
| 447 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 448 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, |
| 449 | struct blkcg_policy *pol) |
| 450 | { |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 451 | return blkcg ? blkcg->cpd[pol->plid] : NULL; |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 452 | } |
| 453 | |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 454 | /** |
| 455 | * pdata_to_blkg - get blkg associated with policy private data |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 456 | * @pd: policy private data of interest |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 457 | * |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 458 | * @pd is policy private data. Determine the blkg it's associated with. |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 459 | */ |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 460 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 461 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 462 | return pd ? pd->blkg : NULL; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 463 | } |
| 464 | |
Tejun Heo | 8143764 | 2015-08-18 14:55:15 -0700 | [diff] [blame] | 465 | static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) |
| 466 | { |
| 467 | return cpd ? cpd->blkcg : NULL; |
| 468 | } |
| 469 | |
Dennis Zhou (Facebook) | 59b5771 | 2018-08-31 16:22:43 -0400 | [diff] [blame] | 470 | extern void blkcg_destroy_blkgs(struct blkcg *blkcg); |
| 471 | |
| 472 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 473 | |
| 474 | /** |
| 475 | * blkcg_cgwb_get - get a reference for blkcg->cgwb_list |
| 476 | * @blkcg: blkcg of interest |
| 477 | * |
| 478 | * This is used to track the number of active wb's related to a blkcg. |
| 479 | */ |
| 480 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) |
| 481 | { |
| 482 | refcount_inc(&blkcg->cgwb_refcnt); |
| 483 | } |
| 484 | |
| 485 | /** |
| 486 | * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list |
| 487 | * @blkcg: blkcg of interest |
| 488 | * |
| 489 | * This is used to track the number of active wb's related to a blkcg. |
| 490 | * When this count goes to zero, all active wb has finished so the |
| 491 | * blkcg can continue destruction by calling blkcg_destroy_blkgs(). |
| 492 | * This work may occur in cgwb_release_workfn() on the cgwb_release |
| 493 | * workqueue. |
| 494 | */ |
| 495 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) |
| 496 | { |
| 497 | if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) |
| 498 | blkcg_destroy_blkgs(blkcg); |
| 499 | } |
| 500 | |
| 501 | #else |
| 502 | |
| 503 | static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } |
| 504 | |
| 505 | static inline void blkcg_cgwb_put(struct blkcg *blkcg) |
| 506 | { |
| 507 | /* wb isn't being accounted, so trigger destruction right away */ |
| 508 | blkcg_destroy_blkgs(blkcg); |
| 509 | } |
| 510 | |
| 511 | #endif |
| 512 | |
Tejun Heo | 54e7ed1 | 2012-04-16 13:57:23 -0700 | [diff] [blame] | 513 | /** |
| 514 | * blkg_path - format cgroup path of blkg |
| 515 | * @blkg: blkg of interest |
| 516 | * @buf: target buffer |
| 517 | * @buflen: target buffer length |
| 518 | * |
| 519 | * Format the path of the cgroup of @blkg into @buf. |
| 520 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 521 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
Vivek Goyal | afc24d4 | 2010-04-26 19:27:56 +0200 | [diff] [blame] | 522 | { |
Tejun Heo | 4c737b4 | 2016-08-10 11:23:44 -0400 | [diff] [blame] | 523 | return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); |
Vivek Goyal | afc24d4 | 2010-04-26 19:27:56 +0200 | [diff] [blame] | 524 | } |
| 525 | |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 526 | /** |
| 527 | * blkg_get - get a blkg reference |
| 528 | * @blkg: blkg to get |
| 529 | * |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 530 | * The caller should be holding an existing reference. |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 531 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 532 | static inline void blkg_get(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 533 | { |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 534 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); |
| 535 | atomic_inc(&blkg->refcnt); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 536 | } |
| 537 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 538 | /** |
| 539 | * blkg_try_get - try and get a blkg reference |
| 540 | * @blkg: blkg to get |
| 541 | * |
| 542 | * This is for use when doing an RCU lookup of the blkg. We may be in the midst |
| 543 | * of freeing this blkg, so we can only use it if the refcnt is not zero. |
| 544 | */ |
| 545 | static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) |
| 546 | { |
| 547 | if (atomic_inc_not_zero(&blkg->refcnt)) |
| 548 | return blkg; |
| 549 | return NULL; |
| 550 | } |
| 551 | |
| 552 | |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 553 | void __blkg_release_rcu(struct rcu_head *rcu); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 554 | |
| 555 | /** |
| 556 | * blkg_put - put a blkg reference |
| 557 | * @blkg: blkg to put |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 558 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 559 | static inline void blkg_put(struct blkcg_gq *blkg) |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 560 | { |
Tejun Heo | a5049a8 | 2014-06-19 17:42:57 -0400 | [diff] [blame] | 561 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); |
| 562 | if (atomic_dec_and_test(&blkg->refcnt)) |
Tejun Heo | 2a4fd07 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 563 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 564 | } |
| 565 | |
Tejun Heo | dd4a4ff | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 566 | /** |
| 567 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants |
| 568 | * @d_blkg: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 569 | * @pos_css: used for iteration |
Tejun Heo | dd4a4ff | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 570 | * @p_blkg: target blkg to walk descendants of |
| 571 | * |
| 572 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU |
| 573 | * read locked. If called under either blkcg or queue lock, the iteration |
| 574 | * is guaranteed to include all and only online blkgs. The caller may |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 575 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 576 | * @p_blkg is included in the iteration and the first node to be visited. |
Tejun Heo | dd4a4ff | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 577 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 578 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ |
| 579 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ |
| 580 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
Tejun Heo | dd4a4ff | 2013-05-14 13:52:30 -0700 | [diff] [blame] | 581 | (p_blkg)->q, false))) |
| 582 | |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 583 | /** |
Tejun Heo | aa539cb | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 584 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants |
| 585 | * @d_blkg: loop cursor pointing to the current descendant |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 586 | * @pos_css: used for iteration |
Tejun Heo | aa539cb | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 587 | * @p_blkg: target blkg to walk descendants of |
| 588 | * |
| 589 | * Similar to blkg_for_each_descendant_pre() but performs post-order |
Tejun Heo | bd8815a | 2013-08-08 20:11:27 -0400 | [diff] [blame] | 590 | * traversal instead. Synchronization rules are the same. @p_blkg is |
| 591 | * included in the iteration and the last node to be visited. |
Tejun Heo | aa539cb | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 592 | */ |
Tejun Heo | 492eb21 | 2013-08-08 20:11:25 -0400 | [diff] [blame] | 593 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ |
| 594 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ |
| 595 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
Tejun Heo | aa539cb | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 596 | (p_blkg)->q, false))) |
| 597 | |
| 598 | /** |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 599 | * blk_get_rl - get request_list to use |
| 600 | * @q: request_queue of interest |
| 601 | * @bio: bio which will be attached to the allocated request (may be %NULL) |
| 602 | * |
| 603 | * The caller wants to allocate a request from @q to use for @bio. Find |
| 604 | * the request_list to use and obtain a reference on it. Should be called |
| 605 | * under queue_lock. This function is guaranteed to return non-%NULL |
| 606 | * request_list. |
| 607 | */ |
| 608 | static inline struct request_list *blk_get_rl(struct request_queue *q, |
| 609 | struct bio *bio) |
| 610 | { |
| 611 | struct blkcg *blkcg; |
| 612 | struct blkcg_gq *blkg; |
| 613 | |
| 614 | rcu_read_lock(); |
| 615 | |
| 616 | blkcg = bio_blkcg(bio); |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 617 | if (blkcg) |
| 618 | css_get(&blkcg->css); |
| 619 | else |
| 620 | blkcg = css_to_blkcg(blkcg_get_css()); |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 621 | |
| 622 | /* bypass blkg lookup and use @q->root_rl directly for root */ |
| 623 | if (blkcg == &blkcg_root) |
| 624 | goto root_rl; |
| 625 | |
| 626 | /* |
| 627 | * Try to use blkg->rl. blkg lookup may fail under memory pressure |
| 628 | * or if either the blkcg or queue is going away. Fall back to |
| 629 | * root_rl in such cases. |
| 630 | */ |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 631 | blkg = blkg_lookup(blkcg, q); |
| 632 | if (unlikely(!blkg)) |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 633 | goto root_rl; |
| 634 | |
| 635 | blkg_get(blkg); |
| 636 | rcu_read_unlock(); |
| 637 | return &blkg->rl; |
| 638 | root_rl: |
| 639 | rcu_read_unlock(); |
| 640 | return &q->root_rl; |
| 641 | } |
| 642 | |
| 643 | /** |
| 644 | * blk_put_rl - put request_list |
| 645 | * @rl: request_list to put |
| 646 | * |
| 647 | * Put the reference acquired by blk_get_rl(). Should be called under |
| 648 | * queue_lock. |
| 649 | */ |
| 650 | static inline void blk_put_rl(struct request_list *rl) |
| 651 | { |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 652 | /* an additional ref is always taken for rl */ |
| 653 | css_put(&rl->blkg->blkcg->css); |
Tejun Heo | 401efbf | 2015-08-18 14:55:06 -0700 | [diff] [blame] | 654 | if (rl->blkg->blkcg != &blkcg_root) |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 655 | blkg_put(rl->blkg); |
| 656 | } |
| 657 | |
| 658 | /** |
| 659 | * blk_rq_set_rl - associate a request with a request_list |
| 660 | * @rq: request of interest |
| 661 | * @rl: target request_list |
| 662 | * |
| 663 | * Associate @rq with @rl so that accounting and freeing can know the |
| 664 | * request_list @rq came from. |
| 665 | */ |
| 666 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) |
| 667 | { |
| 668 | rq->rl = rl; |
| 669 | } |
| 670 | |
| 671 | /** |
| 672 | * blk_rq_rl - return the request_list a request came from |
| 673 | * @rq: request of interest |
| 674 | * |
| 675 | * Return the request_list @rq is allocated from. |
| 676 | */ |
| 677 | static inline struct request_list *blk_rq_rl(struct request *rq) |
| 678 | { |
| 679 | return rq->rl; |
| 680 | } |
| 681 | |
| 682 | struct request_list *__blk_queue_next_rl(struct request_list *rl, |
| 683 | struct request_queue *q); |
| 684 | /** |
| 685 | * blk_queue_for_each_rl - iterate through all request_lists of a request_queue |
| 686 | * |
| 687 | * Should be used under queue_lock. |
| 688 | */ |
| 689 | #define blk_queue_for_each_rl(rl, q) \ |
| 690 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) |
| 691 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 692 | static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) |
Peter Zijlstra | 90d3839 | 2013-11-12 19:42:14 -0800 | [diff] [blame] | 693 | { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 694 | int ret; |
| 695 | |
| 696 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); |
| 697 | if (ret) |
| 698 | return ret; |
| 699 | |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 700 | atomic64_set(&stat->aux_cnt, 0); |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 701 | return 0; |
| 702 | } |
| 703 | |
| 704 | static inline void blkg_stat_exit(struct blkg_stat *stat) |
| 705 | { |
| 706 | percpu_counter_destroy(&stat->cpu_cnt); |
Peter Zijlstra | 90d3839 | 2013-11-12 19:42:14 -0800 | [diff] [blame] | 707 | } |
| 708 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 709 | /** |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 710 | * blkg_stat_add - add a value to a blkg_stat |
| 711 | * @stat: target blkg_stat |
| 712 | * @val: value to add |
| 713 | * |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 714 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU |
| 715 | * don't re-enter this function for the same counter. |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 716 | */ |
| 717 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) |
| 718 | { |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 719 | percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 720 | } |
| 721 | |
| 722 | /** |
| 723 | * blkg_stat_read - read the current value of a blkg_stat |
| 724 | * @stat: blkg_stat to read |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 725 | */ |
| 726 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) |
| 727 | { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 728 | return percpu_counter_sum_positive(&stat->cpu_cnt); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | /** |
| 732 | * blkg_stat_reset - reset a blkg_stat |
| 733 | * @stat: blkg_stat to reset |
| 734 | */ |
| 735 | static inline void blkg_stat_reset(struct blkg_stat *stat) |
| 736 | { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 737 | percpu_counter_set(&stat->cpu_cnt, 0); |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 738 | atomic64_set(&stat->aux_cnt, 0); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | /** |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 742 | * blkg_stat_add_aux - add a blkg_stat into another's aux count |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 743 | * @to: the destination blkg_stat |
| 744 | * @from: the source |
| 745 | * |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 746 | * Add @from's count including the aux one to @to's aux count. |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 747 | */ |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 748 | static inline void blkg_stat_add_aux(struct blkg_stat *to, |
| 749 | struct blkg_stat *from) |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 750 | { |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 751 | atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), |
| 752 | &to->aux_cnt); |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 753 | } |
| 754 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 755 | static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) |
| 756 | { |
| 757 | int i, ret; |
| 758 | |
| 759 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { |
| 760 | ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); |
| 761 | if (ret) { |
| 762 | while (--i >= 0) |
| 763 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); |
| 764 | return ret; |
| 765 | } |
| 766 | atomic64_set(&rwstat->aux_cnt[i], 0); |
| 767 | } |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) |
Peter Zijlstra | 90d3839 | 2013-11-12 19:42:14 -0800 | [diff] [blame] | 772 | { |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 773 | int i; |
| 774 | |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 775 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 776 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); |
Peter Zijlstra | 90d3839 | 2013-11-12 19:42:14 -0800 | [diff] [blame] | 777 | } |
| 778 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 779 | /** |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 780 | * blkg_rwstat_add - add a value to a blkg_rwstat |
| 781 | * @rwstat: target blkg_rwstat |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 782 | * @op: REQ_OP and flags |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 783 | * @val: value to add |
| 784 | * |
| 785 | * Add @val to @rwstat. The counters are chosen according to @rw. The |
| 786 | * caller is responsible for synchronizing calls to this function. |
| 787 | */ |
| 788 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 789 | unsigned int op, uint64_t val) |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 790 | { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 791 | struct percpu_counter *cnt; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 792 | |
Tejun Heo | 636620b | 2018-07-18 04:47:41 -0700 | [diff] [blame] | 793 | if (op_is_discard(op)) |
| 794 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD]; |
| 795 | else if (op_is_write(op)) |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 796 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 797 | else |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 798 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 799 | |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 800 | percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 801 | |
Christoph Hellwig | d71d9ae | 2016-11-01 07:40:03 -0600 | [diff] [blame] | 802 | if (op_is_sync(op)) |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 803 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
| 804 | else |
| 805 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
| 806 | |
Nikolay Borisov | 104b4e5 | 2017-06-20 21:01:20 +0300 | [diff] [blame] | 807 | percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /** |
| 811 | * blkg_rwstat_read - read the current values of a blkg_rwstat |
| 812 | * @rwstat: blkg_rwstat to read |
| 813 | * |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 814 | * Read the current snapshot of @rwstat and return it in the aux counts. |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 815 | */ |
Tejun Heo | c94bed89 | 2012-04-16 13:57:22 -0700 | [diff] [blame] | 816 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 817 | { |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 818 | struct blkg_rwstat result; |
| 819 | int i; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 820 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 821 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 822 | atomic64_set(&result.aux_cnt[i], |
| 823 | percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); |
| 824 | return result; |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 825 | } |
| 826 | |
| 827 | /** |
Tejun Heo | 4d5e80a | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 828 | * blkg_rwstat_total - read the total count of a blkg_rwstat |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 829 | * @rwstat: blkg_rwstat to read |
| 830 | * |
| 831 | * Return the total count of @rwstat regardless of the IO direction. This |
| 832 | * function can be called without synchronization and takes care of u64 |
| 833 | * atomicity. |
| 834 | */ |
Tejun Heo | 4d5e80a | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 835 | static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 836 | { |
| 837 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); |
| 838 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 839 | return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + |
| 840 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 841 | } |
| 842 | |
| 843 | /** |
| 844 | * blkg_rwstat_reset - reset a blkg_rwstat |
| 845 | * @rwstat: blkg_rwstat to reset |
| 846 | */ |
| 847 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) |
| 848 | { |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 849 | int i; |
| 850 | |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 851 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { |
| 852 | percpu_counter_set(&rwstat->cpu_cnt[i], 0); |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 853 | atomic64_set(&rwstat->aux_cnt[i], 0); |
Tejun Heo | 24bdb8e | 2015-08-18 14:55:22 -0700 | [diff] [blame] | 854 | } |
Tejun Heo | edcb072 | 2012-04-01 14:38:42 -0700 | [diff] [blame] | 855 | } |
| 856 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 857 | /** |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 858 | * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 859 | * @to: the destination blkg_rwstat |
| 860 | * @from: the source |
| 861 | * |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 862 | * Add @from's count including the aux one to @to's aux count. |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 863 | */ |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 864 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, |
| 865 | struct blkg_rwstat *from) |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 866 | { |
Arnd Bergmann | ddc2123 | 2018-01-16 16:01:36 +0100 | [diff] [blame] | 867 | u64 sum[BLKG_RWSTAT_NR]; |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 868 | int i; |
| 869 | |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 870 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
Arnd Bergmann | ddc2123 | 2018-01-16 16:01:36 +0100 | [diff] [blame] | 871 | sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); |
| 872 | |
| 873 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 874 | atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), |
Tejun Heo | e6269c4 | 2015-08-18 14:55:21 -0700 | [diff] [blame] | 875 | &to->aux_cnt[i]); |
Tejun Heo | 16b3de6 | 2013-01-09 08:05:12 -0800 | [diff] [blame] | 876 | } |
| 877 | |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 878 | #ifdef CONFIG_BLK_DEV_THROTTLING |
| 879 | extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, |
| 880 | struct bio *bio); |
| 881 | #else |
| 882 | static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, |
| 883 | struct bio *bio) { return false; } |
| 884 | #endif |
| 885 | |
| 886 | static inline bool blkcg_bio_issue_check(struct request_queue *q, |
| 887 | struct bio *bio) |
| 888 | { |
| 889 | struct blkcg *blkcg; |
| 890 | struct blkcg_gq *blkg; |
| 891 | bool throtl = false; |
| 892 | |
| 893 | rcu_read_lock(); |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 894 | |
Shaohua Li | 007cc56 | 2017-07-12 11:49:54 -0700 | [diff] [blame] | 895 | /* associate blkcg if bio hasn't attached one */ |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 896 | bio_associate_blkcg(bio, NULL); |
| 897 | blkcg = bio_blkcg(bio); |
Shaohua Li | 007cc56 | 2017-07-12 11:49:54 -0700 | [diff] [blame] | 898 | |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 899 | blkg = blkg_lookup(blkcg, q); |
| 900 | if (unlikely(!blkg)) { |
| 901 | spin_lock_irq(q->queue_lock); |
Dennis Zhou (Facebook) | 49f4c2d | 2018-09-11 14:41:27 -0400 | [diff] [blame^] | 902 | blkg = __blkg_lookup_create(blkcg, q); |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 903 | if (IS_ERR(blkg)) |
| 904 | blkg = NULL; |
| 905 | spin_unlock_irq(q->queue_lock); |
| 906 | } |
| 907 | |
| 908 | throtl = blk_throtl_bio(q, blkg, bio); |
| 909 | |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 910 | if (!throtl) { |
| 911 | blkg = blkg ?: q->root_blkg; |
Josef Bacik | c454edc | 2018-07-30 10:10:01 -0400 | [diff] [blame] | 912 | /* |
| 913 | * If the bio is flagged with BIO_QUEUE_ENTERED it means this |
| 914 | * is a split bio and we would have already accounted for the |
| 915 | * size of the bio. |
| 916 | */ |
| 917 | if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) |
| 918 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, |
| 919 | bio->bi_iter.bi_size); |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 920 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); |
Tejun Heo | 77ea733 | 2015-08-18 14:55:24 -0700 | [diff] [blame] | 921 | } |
| 922 | |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 923 | rcu_read_unlock(); |
| 924 | return !throtl; |
| 925 | } |
| 926 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 927 | static inline void blkcg_use_delay(struct blkcg_gq *blkg) |
| 928 | { |
| 929 | if (atomic_add_return(1, &blkg->use_delay) == 1) |
| 930 | atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); |
| 931 | } |
| 932 | |
| 933 | static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) |
| 934 | { |
| 935 | int old = atomic_read(&blkg->use_delay); |
| 936 | |
| 937 | if (old == 0) |
| 938 | return 0; |
| 939 | |
| 940 | /* |
| 941 | * We do this song and dance because we can race with somebody else |
| 942 | * adding or removing delay. If we just did an atomic_dec we'd end up |
| 943 | * negative and we'd already be in trouble. We need to subtract 1 and |
| 944 | * then check to see if we were the last delay so we can drop the |
| 945 | * congestion count on the cgroup. |
| 946 | */ |
| 947 | while (old) { |
| 948 | int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); |
| 949 | if (cur == old) |
| 950 | break; |
| 951 | old = cur; |
| 952 | } |
| 953 | |
| 954 | if (old == 0) |
| 955 | return 0; |
| 956 | if (old == 1) |
| 957 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); |
| 958 | return 1; |
| 959 | } |
| 960 | |
| 961 | static inline void blkcg_clear_delay(struct blkcg_gq *blkg) |
| 962 | { |
| 963 | int old = atomic_read(&blkg->use_delay); |
| 964 | if (!old) |
| 965 | return; |
| 966 | /* We only want 1 person clearing the congestion count for this blkg. */ |
| 967 | while (old) { |
| 968 | int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); |
| 969 | if (cur == old) { |
| 970 | atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); |
| 971 | break; |
| 972 | } |
| 973 | old = cur; |
| 974 | } |
| 975 | } |
| 976 | |
| 977 | void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); |
| 978 | void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); |
| 979 | void blkcg_maybe_throttle_current(void); |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 980 | #else /* CONFIG_BLK_CGROUP */ |
| 981 | |
Tejun Heo | efa7d1c | 2015-05-22 17:13:18 -0400 | [diff] [blame] | 982 | struct blkcg { |
| 983 | }; |
Jens Axboe | 2f5ea47 | 2009-12-03 21:06:43 +0100 | [diff] [blame] | 984 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 985 | struct blkg_policy_data { |
| 986 | }; |
| 987 | |
Arianna Avanzini | e48453c | 2015-06-05 23:38:42 +0200 | [diff] [blame] | 988 | struct blkcg_policy_data { |
| 989 | }; |
| 990 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 991 | struct blkcg_gq { |
Jens Axboe | 2f5ea47 | 2009-12-03 21:06:43 +0100 | [diff] [blame] | 992 | }; |
| 993 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 994 | struct blkcg_policy { |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 995 | }; |
| 996 | |
Tejun Heo | 496d5e7 | 2015-05-22 17:13:21 -0400 | [diff] [blame] | 997 | #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) |
| 998 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 999 | static inline void blkcg_maybe_throttle_current(void) { } |
| 1000 | static inline bool blk_cgroup_congested(void) { return false; } |
| 1001 | |
Tejun Heo | efa7d1c | 2015-05-22 17:13:18 -0400 | [diff] [blame] | 1002 | #ifdef CONFIG_BLOCK |
| 1003 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1004 | static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } |
| 1005 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1006 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
Bart Van Assche | b86d865 | 2018-08-10 13:28:07 -0700 | [diff] [blame] | 1007 | static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) |
| 1008 | { return NULL; } |
Tejun Heo | 5efd611 | 2012-03-05 13:15:12 -0800 | [diff] [blame] | 1009 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
| 1010 | static inline void blkcg_drain_queue(struct request_queue *q) { } |
| 1011 | static inline void blkcg_exit_queue(struct request_queue *q) { } |
Jens Axboe | d5bf029 | 2014-06-22 16:31:56 -0600 | [diff] [blame] | 1012 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1013 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1014 | static inline int blkcg_activate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1015 | const struct blkcg_policy *pol) { return 0; } |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1016 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1017 | const struct blkcg_policy *pol) { } |
Vivek Goyal | 3e25206 | 2009-12-04 10:36:42 -0500 | [diff] [blame] | 1018 | |
Dennis Zhou (Facebook) | 27e6fa9 | 2018-09-11 14:41:26 -0400 | [diff] [blame] | 1019 | static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } |
Tejun Heo | b1208b5 | 2012-06-04 20:40:57 -0700 | [diff] [blame] | 1020 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1021 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 1022 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
| 1023 | struct blkcg_policy *pol) { return NULL; } |
| 1024 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1025 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } |
| 1026 | static inline void blkg_get(struct blkcg_gq *blkg) { } |
| 1027 | static inline void blkg_put(struct blkcg_gq *blkg) { } |
Vivek Goyal | afc24d4 | 2010-04-26 19:27:56 +0200 | [diff] [blame] | 1028 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1029 | static inline struct request_list *blk_get_rl(struct request_queue *q, |
| 1030 | struct bio *bio) { return &q->root_rl; } |
| 1031 | static inline void blk_put_rl(struct request_list *rl) { } |
| 1032 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } |
| 1033 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } |
| 1034 | |
Tejun Heo | ae11889 | 2015-08-18 14:55:20 -0700 | [diff] [blame] | 1035 | static inline bool blkcg_bio_issue_check(struct request_queue *q, |
| 1036 | struct bio *bio) { return true; } |
| 1037 | |
Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 1038 | #define blk_queue_for_each_rl(rl, q) \ |
| 1039 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) |
| 1040 | |
Tejun Heo | efa7d1c | 2015-05-22 17:13:18 -0400 | [diff] [blame] | 1041 | #endif /* CONFIG_BLOCK */ |
Tejun Heo | 36558c8 | 2012-04-16 13:57:24 -0700 | [diff] [blame] | 1042 | #endif /* CONFIG_BLK_CGROUP */ |
| 1043 | #endif /* _BLK_CGROUP_H */ |