Bart Van Assche | 855265c | 2021-06-11 16:48:32 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #if !defined(_MQ_DEADLINE_CGROUP_H_) |
| 4 | #define _MQ_DEADLINE_CGROUP_H_ |
| 5 | |
| 6 | #include <linux/blk-cgroup.h> |
| 7 | |
| 8 | struct request_queue; |
| 9 | |
| 10 | /** |
| 11 | * struct io_stats_per_prio - I/O statistics per I/O priority class. |
| 12 | * @inserted: Number of inserted requests. |
| 13 | * @merged: Number of merged requests. |
| 14 | * @dispatched: Number of dispatched requests. |
| 15 | * @completed: Number of I/O completions. |
| 16 | */ |
| 17 | struct io_stats_per_prio { |
| 18 | local_t inserted; |
| 19 | local_t merged; |
| 20 | local_t dispatched; |
| 21 | local_t completed; |
| 22 | }; |
| 23 | |
| 24 | /* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */ |
| 25 | struct blkcg_io_stats { |
| 26 | struct io_stats_per_prio stats[4]; |
| 27 | }; |
| 28 | |
| 29 | /** |
| 30 | * struct dd_blkcg - Per cgroup data. |
| 31 | * @cpd: blkcg_policy_data structure. |
| 32 | * @stats: I/O statistics. |
| 33 | */ |
| 34 | struct dd_blkcg { |
| 35 | struct blkcg_policy_data cpd; /* must be the first member */ |
| 36 | struct blkcg_io_stats __percpu *stats; |
| 37 | }; |
| 38 | |
| 39 | /* |
| 40 | * Count one event of type 'event_type' and with I/O priority class |
| 41 | * 'prio_class'. |
| 42 | */ |
| 43 | #define ddcg_count(ddcg, event_type, prio_class) do { \ |
| 44 | if (ddcg) { \ |
| 45 | struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \ |
| 46 | \ |
| 47 | BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ |
| 48 | BUILD_BUG_ON(!__same_type((prio_class), u8)); \ |
| 49 | local_inc(&io_stats->stats[(prio_class)].event_type); \ |
| 50 | put_cpu_ptr(io_stats); \ |
| 51 | } \ |
| 52 | } while (0) |
| 53 | |
| 54 | /* |
| 55 | * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls |
| 56 | * across all CPUs. No locking or barriers since it is fine if the returned |
| 57 | * sum is slightly outdated. |
| 58 | */ |
| 59 | #define ddcg_sum(ddcg, event_type, prio) ({ \ |
| 60 | unsigned int cpu; \ |
| 61 | u32 sum = 0; \ |
| 62 | \ |
| 63 | BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ |
| 64 | BUILD_BUG_ON(!__same_type((prio), u8)); \ |
| 65 | for_each_present_cpu(cpu) \ |
| 66 | sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \ |
| 67 | stats[(prio)].event_type); \ |
| 68 | sum; \ |
| 69 | }) |
| 70 | |
| 71 | #ifdef CONFIG_BLK_CGROUP |
| 72 | |
| 73 | /** |
| 74 | * struct dd_blkg - Per (cgroup, request queue) data. |
| 75 | * @pd: blkg_policy_data structure. |
| 76 | */ |
| 77 | struct dd_blkg { |
| 78 | struct blkg_policy_data pd; /* must be the first member */ |
| 79 | }; |
| 80 | |
| 81 | struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio); |
| 82 | int dd_activate_policy(struct request_queue *q); |
| 83 | void dd_deactivate_policy(struct request_queue *q); |
| 84 | int __init dd_blkcg_init(void); |
| 85 | void __exit dd_blkcg_exit(void); |
| 86 | |
| 87 | #else /* CONFIG_BLK_CGROUP */ |
| 88 | |
| 89 | static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) |
| 90 | { |
| 91 | return NULL; |
| 92 | } |
| 93 | |
| 94 | static inline int dd_activate_policy(struct request_queue *q) |
| 95 | { |
| 96 | return 0; |
| 97 | } |
| 98 | |
| 99 | static inline void dd_deactivate_policy(struct request_queue *q) |
| 100 | { |
| 101 | } |
| 102 | |
| 103 | static inline int dd_blkcg_init(void) |
| 104 | { |
| 105 | return 0; |
| 106 | } |
| 107 | |
| 108 | static inline void dd_blkcg_exit(void) |
| 109 | { |
| 110 | } |
| 111 | |
| 112 | #endif /* CONFIG_BLK_CGROUP */ |
| 113 | |
| 114 | #endif /* _MQ_DEADLINE_CGROUP_H_ */ |