blob: 8742af3be84b33ee95c2fbf5de709def917daff3 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070018#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Gui Jianfeng34d0f172010-04-13 16:05:49 +080020#include <linux/genhd.h>
Tejun Heo72e06c22012-03-05 13:15:00 -080021#include <linux/delay.h>
22#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050023
Divyesh Shah84c124d2010-04-09 08:31:19 +020024#define MAX_KEY_LEN 100
25
Vivek Goyal3e252062009-12-04 10:36:42 -050026static DEFINE_SPINLOCK(blkio_list_lock);
27static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050028
Vivek Goyal31e4c282009-12-03 12:59:42 -050029struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050030EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31
Tejun Heo035d10b2012-03-05 13:15:04 -080032static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
33
Ben Blum67523c42010-03-10 15:22:11 -080034static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
35 struct cgroup *);
Tejun Heobb9d97b2011-12-12 18:12:21 -080036static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
37 struct cgroup_taskset *);
38static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
39 struct cgroup_taskset *);
Ben Blum67523c42010-03-10 15:22:11 -080040static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
41static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
42
Vivek Goyal062a6442010-09-15 17:06:33 -040043/* for encoding cft->private value on file */
44#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
45/* What policy owns the file, proportional or throttle */
46#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
47#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
48
Ben Blum67523c42010-03-10 15:22:11 -080049struct cgroup_subsys blkio_subsys = {
50 .name = "blkio",
51 .create = blkiocg_create,
Tejun Heobb9d97b2011-12-12 18:12:21 -080052 .can_attach = blkiocg_can_attach,
53 .attach = blkiocg_attach,
Ben Blum67523c42010-03-10 15:22:11 -080054 .destroy = blkiocg_destroy,
55 .populate = blkiocg_populate,
Ben Blum67523c42010-03-10 15:22:11 -080056 .subsys_id = blkio_subsys_id,
Ben Blum67523c42010-03-10 15:22:11 -080057 .use_id = 1,
58 .module = THIS_MODULE,
59};
60EXPORT_SYMBOL_GPL(blkio_subsys);
61
Vivek Goyal31e4c282009-12-03 12:59:42 -050062struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
63{
64 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
65 struct blkio_cgroup, css);
66}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050067EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050068
Vivek Goyal70087dc2011-05-16 15:24:08 +020069struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
70{
71 return container_of(task_subsys_state(tsk, blkio_subsys_id),
72 struct blkio_cgroup, css);
73}
74EXPORT_SYMBOL_GPL(task_blkio_cgroup);
75
Vivek Goyal062a6442010-09-15 17:06:33 -040076static inline void
77blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
78{
79 struct blkio_policy_type *blkiop;
80
81 list_for_each_entry(blkiop, &blkio_list, list) {
82 /* If this policy does not own the blkg, do not send updates */
83 if (blkiop->plid != blkg->plid)
84 continue;
85 if (blkiop->ops.blkio_update_group_weight_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -080086 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +020087 blkg, weight);
Vivek Goyal062a6442010-09-15 17:06:33 -040088 }
89}
90
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040091static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
92 int fileid)
93{
94 struct blkio_policy_type *blkiop;
95
96 list_for_each_entry(blkiop, &blkio_list, list) {
97
98 /* If this policy does not own the blkg, do not send updates */
99 if (blkiop->plid != blkg->plid)
100 continue;
101
102 if (fileid == BLKIO_THROTL_read_bps_device
103 && blkiop->ops.blkio_update_group_read_bps_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800104 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200105 blkg, bps);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400106
107 if (fileid == BLKIO_THROTL_write_bps_device
108 && blkiop->ops.blkio_update_group_write_bps_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800109 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200110 blkg, bps);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400111 }
112}
113
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400114static inline void blkio_update_group_iops(struct blkio_group *blkg,
115 unsigned int iops, int fileid)
116{
117 struct blkio_policy_type *blkiop;
118
119 list_for_each_entry(blkiop, &blkio_list, list) {
120
121 /* If this policy does not own the blkg, do not send updates */
122 if (blkiop->plid != blkg->plid)
123 continue;
124
125 if (fileid == BLKIO_THROTL_read_iops_device
126 && blkiop->ops.blkio_update_group_read_iops_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800127 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200128 blkg, iops);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400129
130 if (fileid == BLKIO_THROTL_write_iops_device
131 && blkiop->ops.blkio_update_group_write_iops_fn)
Tejun Heoca32aef2012-03-05 13:15:03 -0800132 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200133 blkg,iops);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400134 }
135}
136
Divyesh Shah91952912010-04-01 15:01:41 -0700137/*
138 * Add to the appropriate stat variable depending on the request type.
139 * This should be called with the blkg->stats_lock held.
140 */
Divyesh Shah84c124d2010-04-09 08:31:19 +0200141static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
142 bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700143{
Divyesh Shah84c124d2010-04-09 08:31:19 +0200144 if (direction)
145 stat[BLKIO_STAT_WRITE] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700146 else
Divyesh Shah84c124d2010-04-09 08:31:19 +0200147 stat[BLKIO_STAT_READ] += add;
148 if (sync)
149 stat[BLKIO_STAT_SYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700150 else
Divyesh Shah84c124d2010-04-09 08:31:19 +0200151 stat[BLKIO_STAT_ASYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700152}
153
Divyesh Shahcdc11842010-04-08 21:15:10 -0700154/*
155 * Decrements the appropriate stat variable if non-zero depending on the
156 * request type. Panics on value being zero.
157 * This should be called with the blkg->stats_lock held.
158 */
159static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
160{
161 if (direction) {
162 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
163 stat[BLKIO_STAT_WRITE]--;
164 } else {
165 BUG_ON(stat[BLKIO_STAT_READ] == 0);
166 stat[BLKIO_STAT_READ]--;
167 }
168 if (sync) {
169 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
170 stat[BLKIO_STAT_SYNC]--;
171 } else {
172 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
173 stat[BLKIO_STAT_ASYNC]--;
174 }
175}
176
177#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah812df482010-04-08 21:15:35 -0700178/* This should be called with the blkg->stats_lock held. */
179static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
180 struct blkio_group *curr_blkg)
181{
182 if (blkio_blkg_waiting(&blkg->stats))
183 return;
184 if (blkg == curr_blkg)
185 return;
186 blkg->stats.start_group_wait_time = sched_clock();
187 blkio_mark_blkg_waiting(&blkg->stats);
188}
189
190/* This should be called with the blkg->stats_lock held. */
191static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
192{
193 unsigned long long now;
194
195 if (!blkio_blkg_waiting(stats))
196 return;
197
198 now = sched_clock();
199 if (time_after64(now, stats->start_group_wait_time))
200 stats->group_wait_time += now - stats->start_group_wait_time;
201 blkio_clear_blkg_waiting(stats);
202}
203
204/* This should be called with the blkg->stats_lock held. */
205static void blkio_end_empty_time(struct blkio_group_stats *stats)
206{
207 unsigned long long now;
208
209 if (!blkio_blkg_empty(stats))
210 return;
211
212 now = sched_clock();
213 if (time_after64(now, stats->start_empty_time))
214 stats->empty_time += now - stats->start_empty_time;
215 blkio_clear_blkg_empty(stats);
216}
217
218void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
219{
220 unsigned long flags;
221
222 spin_lock_irqsave(&blkg->stats_lock, flags);
223 BUG_ON(blkio_blkg_idling(&blkg->stats));
224 blkg->stats.start_idle_time = sched_clock();
225 blkio_mark_blkg_idling(&blkg->stats);
226 spin_unlock_irqrestore(&blkg->stats_lock, flags);
227}
228EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
229
230void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
231{
232 unsigned long flags;
233 unsigned long long now;
234 struct blkio_group_stats *stats;
235
236 spin_lock_irqsave(&blkg->stats_lock, flags);
237 stats = &blkg->stats;
238 if (blkio_blkg_idling(stats)) {
239 now = sched_clock();
240 if (time_after64(now, stats->start_idle_time))
241 stats->idle_time += now - stats->start_idle_time;
242 blkio_clear_blkg_idling(stats);
243 }
244 spin_unlock_irqrestore(&blkg->stats_lock, flags);
245}
246EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
247
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200248void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
Divyesh Shahcdc11842010-04-08 21:15:10 -0700249{
250 unsigned long flags;
251 struct blkio_group_stats *stats;
252
253 spin_lock_irqsave(&blkg->stats_lock, flags);
254 stats = &blkg->stats;
255 stats->avg_queue_size_sum +=
256 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
257 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
258 stats->avg_queue_size_samples++;
Divyesh Shah812df482010-04-08 21:15:35 -0700259 blkio_update_group_wait_time(stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700260 spin_unlock_irqrestore(&blkg->stats_lock, flags);
261}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200262EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
263
Vivek Goyale5ff0822010-04-26 19:25:11 +0200264void blkiocg_set_start_empty_time(struct blkio_group *blkg)
Divyesh Shah28baf442010-04-14 11:22:38 +0200265{
266 unsigned long flags;
267 struct blkio_group_stats *stats;
268
269 spin_lock_irqsave(&blkg->stats_lock, flags);
270 stats = &blkg->stats;
271
272 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
273 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
274 spin_unlock_irqrestore(&blkg->stats_lock, flags);
275 return;
276 }
277
278 /*
Vivek Goyale5ff0822010-04-26 19:25:11 +0200279 * group is already marked empty. This can happen if cfqq got new
280 * request in parent group and moved to this group while being added
281 * to service tree. Just ignore the event and move on.
Divyesh Shah28baf442010-04-14 11:22:38 +0200282 */
Vivek Goyale5ff0822010-04-26 19:25:11 +0200283 if(blkio_blkg_empty(stats)) {
284 spin_unlock_irqrestore(&blkg->stats_lock, flags);
285 return;
286 }
287
Divyesh Shah28baf442010-04-14 11:22:38 +0200288 stats->start_empty_time = sched_clock();
289 blkio_mark_blkg_empty(stats);
290 spin_unlock_irqrestore(&blkg->stats_lock, flags);
291}
292EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
293
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200294void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
295 unsigned long dequeue)
296{
297 blkg->stats.dequeue += dequeue;
298}
299EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Divyesh Shah812df482010-04-08 21:15:35 -0700300#else
301static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
302 struct blkio_group *curr_blkg) {}
303static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
Divyesh Shahcdc11842010-04-08 21:15:10 -0700304#endif
305
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200306void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Divyesh Shahcdc11842010-04-08 21:15:10 -0700307 struct blkio_group *curr_blkg, bool direction,
308 bool sync)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&blkg->stats_lock, flags);
313 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
314 sync);
Divyesh Shah812df482010-04-08 21:15:35 -0700315 blkio_end_empty_time(&blkg->stats);
316 blkio_set_start_group_wait_time(blkg, curr_blkg);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700317 spin_unlock_irqrestore(&blkg->stats_lock, flags);
318}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200319EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700320
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200321void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Divyesh Shahcdc11842010-04-08 21:15:10 -0700322 bool direction, bool sync)
323{
324 unsigned long flags;
325
326 spin_lock_irqsave(&blkg->stats_lock, flags);
327 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
328 direction, sync);
329 spin_unlock_irqrestore(&blkg->stats_lock, flags);
330}
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200331EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700332
Justin TerAvest167400d2011-03-12 16:54:00 +0100333void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
334 unsigned long unaccounted_time)
Vivek Goyal22084192009-12-03 12:59:49 -0500335{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700336 unsigned long flags;
337
338 spin_lock_irqsave(&blkg->stats_lock, flags);
339 blkg->stats.time += time;
Vivek Goyala23e6862011-05-19 15:38:20 -0400340#ifdef CONFIG_DEBUG_BLK_CGROUP
Justin TerAvest167400d2011-03-12 16:54:00 +0100341 blkg->stats.unaccounted_time += unaccounted_time;
Vivek Goyala23e6862011-05-19 15:38:20 -0400342#endif
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700343 spin_unlock_irqrestore(&blkg->stats_lock, flags);
Vivek Goyal22084192009-12-03 12:59:49 -0500344}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700345EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -0500346
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400347/*
348 * should be called under rcu read lock or queue lock to make sure blkg pointer
349 * is valid.
350 */
Divyesh Shah84c124d2010-04-09 08:31:19 +0200351void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
352 uint64_t bytes, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700353{
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400354 struct blkio_group_stats_cpu *stats_cpu;
Vivek Goyal575969a2011-05-19 15:38:29 -0400355 unsigned long flags;
356
357 /*
358 * Disabling interrupts to provide mutual exclusion between two
359 * writes on same cpu. It probably is not needed for 64bit. Not
360 * optimizing that case yet.
361 */
362 local_irq_save(flags);
Divyesh Shah91952912010-04-01 15:01:41 -0700363
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400364 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
365
Vivek Goyal575969a2011-05-19 15:38:29 -0400366 u64_stats_update_begin(&stats_cpu->syncp);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400367 stats_cpu->sectors += bytes >> 9;
368 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
369 1, direction, sync);
370 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
371 bytes, direction, sync);
Vivek Goyal575969a2011-05-19 15:38:29 -0400372 u64_stats_update_end(&stats_cpu->syncp);
373 local_irq_restore(flags);
Divyesh Shah91952912010-04-01 15:01:41 -0700374}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200375EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700376
Divyesh Shah84c124d2010-04-09 08:31:19 +0200377void blkiocg_update_completion_stats(struct blkio_group *blkg,
378 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700379{
380 struct blkio_group_stats *stats;
381 unsigned long flags;
382 unsigned long long now = sched_clock();
383
384 spin_lock_irqsave(&blkg->stats_lock, flags);
385 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200386 if (time_after64(now, io_start_time))
387 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
388 now - io_start_time, direction, sync);
389 if (time_after64(io_start_time, start_time))
390 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
391 io_start_time - start_time, direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700392 spin_unlock_irqrestore(&blkg->stats_lock, flags);
393}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200394EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700395
Vivek Goyal317389a2011-05-23 10:02:19 +0200396/* Merged stats are per cpu. */
Divyesh Shah812d4022010-04-08 21:14:23 -0700397void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
398 bool sync)
399{
Vivek Goyal317389a2011-05-23 10:02:19 +0200400 struct blkio_group_stats_cpu *stats_cpu;
Divyesh Shah812d4022010-04-08 21:14:23 -0700401 unsigned long flags;
402
Vivek Goyal317389a2011-05-23 10:02:19 +0200403 /*
404 * Disabling interrupts to provide mutual exclusion between two
405 * writes on same cpu. It probably is not needed for 64bit. Not
406 * optimizing that case yet.
407 */
408 local_irq_save(flags);
409
410 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411
412 u64_stats_update_begin(&stats_cpu->syncp);
413 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
414 direction, sync);
415 u64_stats_update_end(&stats_cpu->syncp);
416 local_irq_restore(flags);
Divyesh Shah812d4022010-04-08 21:14:23 -0700417}
418EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
419
Tejun Heocd1604f2012-03-05 13:15:06 -0800420struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
421 struct request_queue *q,
422 enum blkio_policy_id plid,
423 bool for_root)
424 __releases(q->queue_lock) __acquires(q->queue_lock)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400425{
Tejun Heocd1604f2012-03-05 13:15:06 -0800426 struct blkio_policy_type *pol = blkio_policy[plid];
427 struct blkio_group *blkg, *new_blkg;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400428
Tejun Heocd1604f2012-03-05 13:15:06 -0800429 WARN_ON_ONCE(!rcu_read_lock_held());
430 lockdep_assert_held(q->queue_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500431
Tejun Heocd1604f2012-03-05 13:15:06 -0800432 /*
433 * This could be the first entry point of blkcg implementation and
434 * we shouldn't allow anything to go through for a bypassing queue.
435 * The following can be removed if blkg lookup is guaranteed to
436 * fail on a bypassing queue.
437 */
438 if (unlikely(blk_queue_bypass(q)) && !for_root)
439 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
440
441 blkg = blkg_lookup(blkcg, q, plid);
442 if (blkg)
443 return blkg;
444
445 if (!css_tryget(&blkcg->css))
446 return ERR_PTR(-EINVAL);
447
448 /*
449 * Allocate and initialize.
450 *
451 * FIXME: The following is broken. Percpu memory allocation
452 * requires %GFP_KERNEL context and can't be performed from IO
453 * path. Allocation here should inherently be atomic and the
454 * following lock dancing can be removed once the broken percpu
455 * allocation is fixed.
456 */
457 spin_unlock_irq(q->queue_lock);
458 rcu_read_unlock();
459
460 new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg);
461 if (new_blkg) {
462 new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
463
464 spin_lock_init(&new_blkg->stats_lock);
465 rcu_assign_pointer(new_blkg->q, q);
466 new_blkg->blkcg_id = css_id(&blkcg->css);
467 new_blkg->plid = plid;
468 cgroup_path(blkcg->css.cgroup, new_blkg->path,
469 sizeof(new_blkg->path));
470 }
471
472 rcu_read_lock();
473 spin_lock_irq(q->queue_lock);
474 css_put(&blkcg->css);
475
476 /* did bypass get turned on inbetween? */
477 if (unlikely(blk_queue_bypass(q)) && !for_root) {
478 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
479 goto out;
480 }
481
482 /* did someone beat us to it? */
483 blkg = blkg_lookup(blkcg, q, plid);
484 if (unlikely(blkg))
485 goto out;
486
487 /* did alloc fail? */
488 if (unlikely(!new_blkg || !new_blkg->stats_cpu)) {
489 blkg = ERR_PTR(-ENOMEM);
490 goto out;
491 }
492
493 /* insert */
494 spin_lock(&blkcg->lock);
495 swap(blkg, new_blkg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500496 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
Tejun Heocd1604f2012-03-05 13:15:06 -0800497 pol->ops.blkio_link_group_fn(q, blkg);
498 spin_unlock(&blkcg->lock);
499out:
500 if (new_blkg) {
501 free_percpu(new_blkg->stats_cpu);
502 kfree(new_blkg);
503 }
504 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500505}
Tejun Heocd1604f2012-03-05 13:15:06 -0800506EXPORT_SYMBOL_GPL(blkg_lookup_create);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500507
Vivek Goyalb1c35762009-12-03 12:59:47 -0500508static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
509{
510 hlist_del_init_rcu(&blkg->blkcg_node);
511 blkg->blkcg_id = 0;
512}
513
514/*
515 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
516 * indicating that blk_group was unhashed by the time we got to it.
517 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500518int blkiocg_del_blkio_group(struct blkio_group *blkg)
519{
Vivek Goyalb1c35762009-12-03 12:59:47 -0500520 struct blkio_cgroup *blkcg;
521 unsigned long flags;
522 struct cgroup_subsys_state *css;
523 int ret = 1;
524
525 rcu_read_lock();
526 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
Jens Axboe0f3942a2010-05-03 14:28:55 +0200527 if (css) {
528 blkcg = container_of(css, struct blkio_cgroup, css);
529 spin_lock_irqsave(&blkcg->lock, flags);
530 if (!hlist_unhashed(&blkg->blkcg_node)) {
531 __blkiocg_del_blkio_group(blkg);
532 ret = 0;
533 }
534 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500535 }
Jens Axboe0f3942a2010-05-03 14:28:55 +0200536
Vivek Goyalb1c35762009-12-03 12:59:47 -0500537 rcu_read_unlock();
538 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500539}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500540EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500541
542/* called under rcu_read_lock(). */
Tejun Heocd1604f2012-03-05 13:15:06 -0800543struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
544 struct request_queue *q,
545 enum blkio_policy_id plid)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500546{
547 struct blkio_group *blkg;
548 struct hlist_node *n;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500549
Tejun Heoca32aef2012-03-05 13:15:03 -0800550 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
551 if (blkg->q == q && blkg->plid == plid)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500552 return blkg;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500553 return NULL;
554}
Tejun Heocd1604f2012-03-05 13:15:06 -0800555EXPORT_SYMBOL_GPL(blkg_lookup);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500556
Tejun Heo72e06c22012-03-05 13:15:00 -0800557void blkg_destroy_all(struct request_queue *q)
558{
559 struct blkio_policy_type *pol;
560
561 while (true) {
562 bool done = true;
563
564 spin_lock(&blkio_list_lock);
565 spin_lock_irq(q->queue_lock);
566
567 /*
568 * clear_queue_fn() might return with non-empty group list
569 * if it raced cgroup removal and lost. cgroup removal is
570 * guaranteed to make forward progress and retrying after a
571 * while is enough. This ugliness is scheduled to be
572 * removed after locking update.
573 */
574 list_for_each_entry(pol, &blkio_list, list)
575 if (!pol->ops.blkio_clear_queue_fn(q))
576 done = false;
577
578 spin_unlock_irq(q->queue_lock);
579 spin_unlock(&blkio_list_lock);
580
581 if (done)
582 break;
583
584 msleep(10); /* just some random duration I like */
585 }
586}
587
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400588static void blkio_reset_stats_cpu(struct blkio_group *blkg)
589{
590 struct blkio_group_stats_cpu *stats_cpu;
591 int i, j, k;
592 /*
593 * Note: On 64 bit arch this should not be an issue. This has the
594 * possibility of returning some inconsistent value on 32bit arch
595 * as 64bit update on 32bit is non atomic. Taking care of this
596 * corner case makes code very complicated, like sending IPIs to
597 * cpus, taking care of stats of offline cpus etc.
598 *
599 * reset stats is anyway more of a debug feature and this sounds a
600 * corner case. So I am not complicating the code yet until and
601 * unless this becomes a real issue.
602 */
603 for_each_possible_cpu(i) {
604 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
605 stats_cpu->sectors = 0;
606 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
607 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
608 stats_cpu->stat_arr_cpu[j][k] = 0;
609 }
610}
611
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700612static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200613blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700614{
615 struct blkio_cgroup *blkcg;
616 struct blkio_group *blkg;
Divyesh Shah812df482010-04-08 21:15:35 -0700617 struct blkio_group_stats *stats;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700618 struct hlist_node *n;
Divyesh Shahcdc11842010-04-08 21:15:10 -0700619 uint64_t queued[BLKIO_STAT_TOTAL];
620 int i;
Divyesh Shah812df482010-04-08 21:15:35 -0700621#ifdef CONFIG_DEBUG_BLK_CGROUP
622 bool idling, waiting, empty;
623 unsigned long long now = sched_clock();
624#endif
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700625
626 blkcg = cgroup_to_blkio_cgroup(cgroup);
627 spin_lock_irq(&blkcg->lock);
628 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
629 spin_lock(&blkg->stats_lock);
Divyesh Shah812df482010-04-08 21:15:35 -0700630 stats = &blkg->stats;
631#ifdef CONFIG_DEBUG_BLK_CGROUP
632 idling = blkio_blkg_idling(stats);
633 waiting = blkio_blkg_waiting(stats);
634 empty = blkio_blkg_empty(stats);
635#endif
Divyesh Shahcdc11842010-04-08 21:15:10 -0700636 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
Divyesh Shah812df482010-04-08 21:15:35 -0700637 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
638 memset(stats, 0, sizeof(struct blkio_group_stats));
Divyesh Shahcdc11842010-04-08 21:15:10 -0700639 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
Divyesh Shah812df482010-04-08 21:15:35 -0700640 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
641#ifdef CONFIG_DEBUG_BLK_CGROUP
642 if (idling) {
643 blkio_mark_blkg_idling(stats);
644 stats->start_idle_time = now;
645 }
646 if (waiting) {
647 blkio_mark_blkg_waiting(stats);
648 stats->start_group_wait_time = now;
649 }
650 if (empty) {
651 blkio_mark_blkg_empty(stats);
652 stats->start_empty_time = now;
653 }
654#endif
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700655 spin_unlock(&blkg->stats_lock);
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400656
657 /* Reset Per cpu stats which don't take blkg->stats_lock */
658 blkio_reset_stats_cpu(blkg);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700659 }
Vivek Goyalf0bdc8c2011-05-19 15:38:30 -0400660
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700661 spin_unlock_irq(&blkcg->lock);
662 return 0;
663}
664
Tejun Heo7a4dd282012-03-05 13:15:09 -0800665static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
666 char *str, int chars_left, bool diskname_only)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700667{
Tejun Heo7a4dd282012-03-05 13:15:09 -0800668 snprintf(str, chars_left, "%s", dname);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700669 chars_left -= strlen(str);
670 if (chars_left <= 0) {
671 printk(KERN_WARNING
672 "Possibly incorrect cgroup stat display format");
673 return;
674 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200675 if (diskname_only)
676 return;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700677 switch (type) {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200678 case BLKIO_STAT_READ:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700679 strlcat(str, " Read", chars_left);
680 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200681 case BLKIO_STAT_WRITE:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700682 strlcat(str, " Write", chars_left);
683 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200684 case BLKIO_STAT_SYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700685 strlcat(str, " Sync", chars_left);
686 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200687 case BLKIO_STAT_ASYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700688 strlcat(str, " Async", chars_left);
689 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200690 case BLKIO_STAT_TOTAL:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700691 strlcat(str, " Total", chars_left);
692 break;
693 default:
694 strlcat(str, " Invalid", chars_left);
695 }
696}
697
Divyesh Shah84c124d2010-04-09 08:31:19 +0200698static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800699 struct cgroup_map_cb *cb, const char *dname)
Divyesh Shah84c124d2010-04-09 08:31:19 +0200700{
Tejun Heo7a4dd282012-03-05 13:15:09 -0800701 blkio_get_key_name(0, dname, str, chars_left, true);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200702 cb->fill(cb, str, val);
703 return val;
704}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700705
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400706
707static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
708 enum stat_type_cpu type, enum stat_sub_type sub_type)
709{
710 int cpu;
711 struct blkio_group_stats_cpu *stats_cpu;
Vivek Goyal575969a2011-05-19 15:38:29 -0400712 u64 val = 0, tval;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400713
714 for_each_possible_cpu(cpu) {
Vivek Goyal575969a2011-05-19 15:38:29 -0400715 unsigned int start;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400716 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
717
Vivek Goyal575969a2011-05-19 15:38:29 -0400718 do {
719 start = u64_stats_fetch_begin(&stats_cpu->syncp);
720 if (type == BLKIO_STAT_CPU_SECTORS)
721 tval = stats_cpu->sectors;
722 else
723 tval = stats_cpu->stat_arr_cpu[type][sub_type];
724 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
725
726 val += tval;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400727 }
728
729 return val;
730}
731
732static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800733 struct cgroup_map_cb *cb, const char *dname,
734 enum stat_type_cpu type)
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400735{
736 uint64_t disk_total, val;
737 char key_str[MAX_KEY_LEN];
738 enum stat_sub_type sub_type;
739
740 if (type == BLKIO_STAT_CPU_SECTORS) {
741 val = blkio_read_stat_cpu(blkg, type, 0);
Tejun Heo7a4dd282012-03-05 13:15:09 -0800742 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
743 dname);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400744 }
745
746 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
747 sub_type++) {
Tejun Heo7a4dd282012-03-05 13:15:09 -0800748 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
749 false);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400750 val = blkio_read_stat_cpu(blkg, type, sub_type);
751 cb->fill(cb, key_str, val);
752 }
753
754 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
755 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
756
Tejun Heo7a4dd282012-03-05 13:15:09 -0800757 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
758 false);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400759 cb->fill(cb, key_str, disk_total);
760 return disk_total;
761}
762
Divyesh Shah84c124d2010-04-09 08:31:19 +0200763/* This should be called with blkg->stats_lock held */
764static uint64_t blkio_get_stat(struct blkio_group *blkg,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800765 struct cgroup_map_cb *cb, const char *dname,
766 enum stat_type type)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700767{
768 uint64_t disk_total;
769 char key_str[MAX_KEY_LEN];
Divyesh Shah84c124d2010-04-09 08:31:19 +0200770 enum stat_sub_type sub_type;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700771
Divyesh Shah84c124d2010-04-09 08:31:19 +0200772 if (type == BLKIO_STAT_TIME)
773 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800774 blkg->stats.time, cb, dname);
Justin TerAvest9026e522011-03-22 21:26:54 +0100775#ifdef CONFIG_DEBUG_BLK_CGROUP
Justin TerAvest167400d2011-03-12 16:54:00 +0100776 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
777 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800778 blkg->stats.unaccounted_time, cb, dname);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700779 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
780 uint64_t sum = blkg->stats.avg_queue_size_sum;
781 uint64_t samples = blkg->stats.avg_queue_size_samples;
782 if (samples)
783 do_div(sum, samples);
784 else
785 sum = 0;
Tejun Heo7a4dd282012-03-05 13:15:09 -0800786 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
787 sum, cb, dname);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700788 }
Divyesh Shah812df482010-04-08 21:15:35 -0700789 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
790 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800791 blkg->stats.group_wait_time, cb, dname);
Divyesh Shah812df482010-04-08 21:15:35 -0700792 if (type == BLKIO_STAT_IDLE_TIME)
793 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800794 blkg->stats.idle_time, cb, dname);
Divyesh Shah812df482010-04-08 21:15:35 -0700795 if (type == BLKIO_STAT_EMPTY_TIME)
796 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800797 blkg->stats.empty_time, cb, dname);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200798 if (type == BLKIO_STAT_DEQUEUE)
799 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
Tejun Heo7a4dd282012-03-05 13:15:09 -0800800 blkg->stats.dequeue, cb, dname);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200801#endif
802
803 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
804 sub_type++) {
Tejun Heo7a4dd282012-03-05 13:15:09 -0800805 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
806 false);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200807 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700808 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200809 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
810 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
Tejun Heo7a4dd282012-03-05 13:15:09 -0800811 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
812 false);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700813 cb->fill(cb, key_str, disk_total);
814 return disk_total;
815}
816
Tejun Heo4bfd4822012-03-05 13:15:08 -0800817static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
818 int fileid, struct blkio_cgroup *blkcg)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800819{
Tejun Heoece84242011-10-19 14:31:15 +0200820 struct gendisk *disk = NULL;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800821 struct blkio_group *blkg = NULL;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800822 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
Wanlong Gaod11bb442011-09-21 10:22:10 +0200823 unsigned long major, minor;
Tejun Heoece84242011-10-19 14:31:15 +0200824 int i = 0, ret = -EINVAL;
825 int part;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800826 dev_t dev;
Wanlong Gaod11bb442011-09-21 10:22:10 +0200827 u64 temp;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800828
829 memset(s, 0, sizeof(s));
830
831 while ((p = strsep(&buf, " ")) != NULL) {
832 if (!*p)
833 continue;
834
835 s[i++] = p;
836
837 /* Prevent from inputing too many things */
838 if (i == 3)
839 break;
840 }
841
842 if (i != 2)
Tejun Heoece84242011-10-19 14:31:15 +0200843 goto out;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800844
845 p = strsep(&s[0], ":");
846 if (p != NULL)
847 major_s = p;
848 else
Tejun Heoece84242011-10-19 14:31:15 +0200849 goto out;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800850
851 minor_s = s[0];
852 if (!minor_s)
Tejun Heoece84242011-10-19 14:31:15 +0200853 goto out;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800854
Tejun Heoece84242011-10-19 14:31:15 +0200855 if (strict_strtoul(major_s, 10, &major))
856 goto out;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800857
Tejun Heoece84242011-10-19 14:31:15 +0200858 if (strict_strtoul(minor_s, 10, &minor))
859 goto out;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800860
861 dev = MKDEV(major, minor);
862
Tejun Heoece84242011-10-19 14:31:15 +0200863 if (strict_strtoull(s[1], 10, &temp))
864 goto out;
Wanlong Gaod11bb442011-09-21 10:22:10 +0200865
Tejun Heoe56da7e2012-03-05 13:15:07 -0800866 disk = get_gendisk(dev, &part);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800867 if (!disk || part)
Tejun Heoe56da7e2012-03-05 13:15:07 -0800868 goto out;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800869
870 rcu_read_lock();
871
Tejun Heo4bfd4822012-03-05 13:15:08 -0800872 spin_lock_irq(disk->queue->queue_lock);
873 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
874 spin_unlock_irq(disk->queue->queue_lock);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800875
Tejun Heo4bfd4822012-03-05 13:15:08 -0800876 if (IS_ERR(blkg)) {
877 ret = PTR_ERR(blkg);
878 goto out_unlock;
Wanlong Gaod11bb442011-09-21 10:22:10 +0200879 }
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800880
Vivek Goyal062a6442010-09-15 17:06:33 -0400881 switch (plid) {
882 case BLKIO_POLICY_PROP:
Wanlong Gaod11bb442011-09-21 10:22:10 +0200883 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
884 temp > BLKIO_WEIGHT_MAX)
Tejun Heoe56da7e2012-03-05 13:15:07 -0800885 goto out_unlock;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800886
Tejun Heo4bfd4822012-03-05 13:15:08 -0800887 blkg->conf.weight = temp;
888 blkio_update_group_weight(blkg, temp ?: blkcg->weight);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400889 break;
890 case BLKIO_POLICY_THROTL:
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400891 switch(fileid) {
892 case BLKIO_THROTL_read_bps_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800893 blkg->conf.bps[READ] = temp;
894 blkio_update_group_bps(blkg, temp ?: -1, fileid);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800895 break;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400896 case BLKIO_THROTL_write_bps_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800897 blkg->conf.bps[WRITE] = temp;
898 blkio_update_group_bps(blkg, temp ?: -1, fileid);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400899 break;
900 case BLKIO_THROTL_read_iops_device:
Tejun Heoe56da7e2012-03-05 13:15:07 -0800901 if (temp > THROTL_IOPS_MAX)
902 goto out_unlock;
Tejun Heo4bfd4822012-03-05 13:15:08 -0800903 blkg->conf.iops[READ] = temp;
904 blkio_update_group_iops(blkg, temp ?: -1, fileid);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800905 break;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400906 case BLKIO_THROTL_write_iops_device:
Wanlong Gaod11bb442011-09-21 10:22:10 +0200907 if (temp > THROTL_IOPS_MAX)
Tejun Heoe56da7e2012-03-05 13:15:07 -0800908 goto out_unlock;
Tejun Heo4bfd4822012-03-05 13:15:08 -0800909 blkg->conf.iops[WRITE] = temp;
910 blkio_update_group_iops(blkg, temp ?: -1, fileid);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400911 break;
912 }
Vivek Goyal062a6442010-09-15 17:06:33 -0400913 break;
914 default:
915 BUG();
916 }
Tejun Heoece84242011-10-19 14:31:15 +0200917 ret = 0;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800918out_unlock:
919 rcu_read_unlock();
Tejun Heoece84242011-10-19 14:31:15 +0200920out:
921 put_disk(disk);
Tejun Heoe56da7e2012-03-05 13:15:07 -0800922
923 /*
924 * If queue was bypassing, we should retry. Do so after a short
925 * msleep(). It isn't strictly necessary but queue can be
926 * bypassing for some time and it's always nice to avoid busy
927 * looping.
928 */
929 if (ret == -EBUSY) {
930 msleep(10);
931 return restart_syscall();
932 }
Tejun Heoece84242011-10-19 14:31:15 +0200933 return ret;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800934}
935
Vivek Goyal062a6442010-09-15 17:06:33 -0400936static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
937 const char *buffer)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800938{
939 int ret = 0;
940 char *buf;
Tejun Heoe56da7e2012-03-05 13:15:07 -0800941 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
Vivek Goyal062a6442010-09-15 17:06:33 -0400942 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
943 int fileid = BLKIOFILE_ATTR(cft->private);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800944
945 buf = kstrdup(buffer, GFP_KERNEL);
946 if (!buf)
947 return -ENOMEM;
948
Tejun Heo4bfd4822012-03-05 13:15:08 -0800949 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800950 kfree(buf);
951 return ret;
952}
953
Tejun Heo4bfd4822012-03-05 13:15:08 -0800954static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
955 struct seq_file *m)
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800956{
Tejun Heo7a4dd282012-03-05 13:15:09 -0800957 const char *dname = dev_name(blkg->q->backing_dev_info.dev);
Tejun Heo4bfd4822012-03-05 13:15:08 -0800958 int fileid = BLKIOFILE_ATTR(cft->private);
959 int rw = WRITE;
960
961 switch (blkg->plid) {
Vivek Goyal062a6442010-09-15 17:06:33 -0400962 case BLKIO_POLICY_PROP:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800963 if (blkg->conf.weight)
Tejun Heo7a4dd282012-03-05 13:15:09 -0800964 seq_printf(m, "%s\t%u\n",
965 dname, blkg->conf.weight);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400966 break;
967 case BLKIO_POLICY_THROTL:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800968 switch (fileid) {
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400969 case BLKIO_THROTL_read_bps_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800970 rw = READ;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400971 case BLKIO_THROTL_write_bps_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800972 if (blkg->conf.bps[rw])
Tejun Heo7a4dd282012-03-05 13:15:09 -0800973 seq_printf(m, "%s\t%llu\n",
974 dname, blkg->conf.bps[rw]);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400975 break;
976 case BLKIO_THROTL_read_iops_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800977 rw = READ;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400978 case BLKIO_THROTL_write_iops_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -0800979 if (blkg->conf.iops[rw])
Tejun Heo7a4dd282012-03-05 13:15:09 -0800980 seq_printf(m, "%s\t%u\n",
981 dname, blkg->conf.iops[rw]);
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400982 break;
983 }
Vivek Goyal062a6442010-09-15 17:06:33 -0400984 break;
985 default:
986 BUG();
987 }
988}
989
990/* cgroup files which read their data from policy nodes end up here */
Tejun Heo4bfd4822012-03-05 13:15:08 -0800991static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
992 struct seq_file *m)
Vivek Goyal062a6442010-09-15 17:06:33 -0400993{
Tejun Heo4bfd4822012-03-05 13:15:08 -0800994 struct blkio_group *blkg;
995 struct hlist_node *n;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800996
Tejun Heo4bfd4822012-03-05 13:15:08 -0800997 spin_lock_irq(&blkcg->lock);
998 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
999 if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
1000 blkio_print_group_conf(cft, blkg, m);
1001 spin_unlock_irq(&blkcg->lock);
Vivek Goyal062a6442010-09-15 17:06:33 -04001002}
1003
1004static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1005 struct seq_file *m)
1006{
1007 struct blkio_cgroup *blkcg;
1008 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1009 int name = BLKIOFILE_ATTR(cft->private);
1010
1011 blkcg = cgroup_to_blkio_cgroup(cgrp);
1012
1013 switch(plid) {
1014 case BLKIO_POLICY_PROP:
1015 switch(name) {
1016 case BLKIO_PROP_weight_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -08001017 blkio_read_conf(cft, blkcg, m);
Vivek Goyal062a6442010-09-15 17:06:33 -04001018 return 0;
1019 default:
1020 BUG();
1021 }
1022 break;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001023 case BLKIO_POLICY_THROTL:
1024 switch(name){
1025 case BLKIO_THROTL_read_bps_device:
1026 case BLKIO_THROTL_write_bps_device:
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001027 case BLKIO_THROTL_read_iops_device:
1028 case BLKIO_THROTL_write_iops_device:
Tejun Heo4bfd4822012-03-05 13:15:08 -08001029 blkio_read_conf(cft, blkcg, m);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001030 return 0;
1031 default:
1032 BUG();
1033 }
1034 break;
Vivek Goyal062a6442010-09-15 17:06:33 -04001035 default:
1036 BUG();
1037 }
1038
1039 return 0;
1040}
1041
1042static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001043 struct cftype *cft, struct cgroup_map_cb *cb,
1044 enum stat_type type, bool show_total, bool pcpu)
Vivek Goyal062a6442010-09-15 17:06:33 -04001045{
1046 struct blkio_group *blkg;
1047 struct hlist_node *n;
1048 uint64_t cgroup_total = 0;
1049
1050 rcu_read_lock();
1051 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heo7a4dd282012-03-05 13:15:09 -08001052 const char *dname = dev_name(blkg->q->backing_dev_info.dev);
1053
1054 if (BLKIOFILE_POLICY(cft->private) != blkg->plid)
1055 continue;
1056 if (pcpu)
1057 cgroup_total += blkio_get_stat_cpu(blkg, cb, dname,
1058 type);
1059 else {
1060 spin_lock_irq(&blkg->stats_lock);
1061 cgroup_total += blkio_get_stat(blkg, cb, dname, type);
1062 spin_unlock_irq(&blkg->stats_lock);
Vivek Goyal062a6442010-09-15 17:06:33 -04001063 }
1064 }
1065 if (show_total)
1066 cb->fill(cb, "Total", cgroup_total);
1067 rcu_read_unlock();
1068 return 0;
1069}
1070
1071/* All map kind of cgroup file get serviced by this function */
1072static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1073 struct cgroup_map_cb *cb)
1074{
1075 struct blkio_cgroup *blkcg;
1076 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1077 int name = BLKIOFILE_ATTR(cft->private);
1078
1079 blkcg = cgroup_to_blkio_cgroup(cgrp);
1080
1081 switch(plid) {
1082 case BLKIO_POLICY_PROP:
1083 switch(name) {
1084 case BLKIO_PROP_time:
1085 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001086 BLKIO_STAT_TIME, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001087 case BLKIO_PROP_sectors:
1088 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001089 BLKIO_STAT_CPU_SECTORS, 0, 1);
Vivek Goyal062a6442010-09-15 17:06:33 -04001090 case BLKIO_PROP_io_service_bytes:
1091 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001092 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
Vivek Goyal062a6442010-09-15 17:06:33 -04001093 case BLKIO_PROP_io_serviced:
1094 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001095 BLKIO_STAT_CPU_SERVICED, 1, 1);
Vivek Goyal062a6442010-09-15 17:06:33 -04001096 case BLKIO_PROP_io_service_time:
1097 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001098 BLKIO_STAT_SERVICE_TIME, 1, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001099 case BLKIO_PROP_io_wait_time:
1100 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001101 BLKIO_STAT_WAIT_TIME, 1, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001102 case BLKIO_PROP_io_merged:
1103 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal317389a2011-05-23 10:02:19 +02001104 BLKIO_STAT_CPU_MERGED, 1, 1);
Vivek Goyal062a6442010-09-15 17:06:33 -04001105 case BLKIO_PROP_io_queued:
1106 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001107 BLKIO_STAT_QUEUED, 1, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001108#ifdef CONFIG_DEBUG_BLK_CGROUP
Justin TerAvest9026e522011-03-22 21:26:54 +01001109 case BLKIO_PROP_unaccounted_time:
1110 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001111 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001112 case BLKIO_PROP_dequeue:
1113 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001114 BLKIO_STAT_DEQUEUE, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001115 case BLKIO_PROP_avg_queue_size:
1116 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001117 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001118 case BLKIO_PROP_group_wait_time:
1119 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001120 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001121 case BLKIO_PROP_idle_time:
1122 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001123 BLKIO_STAT_IDLE_TIME, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001124 case BLKIO_PROP_empty_time:
1125 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001126 BLKIO_STAT_EMPTY_TIME, 0, 0);
Vivek Goyal062a6442010-09-15 17:06:33 -04001127#endif
1128 default:
1129 BUG();
1130 }
1131 break;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001132 case BLKIO_POLICY_THROTL:
1133 switch(name){
1134 case BLKIO_THROTL_io_service_bytes:
1135 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001136 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001137 case BLKIO_THROTL_io_serviced:
1138 return blkio_read_blkg_stats(blkcg, cft, cb,
Vivek Goyal5624a4e2011-05-19 15:38:28 -04001139 BLKIO_STAT_CPU_SERVICED, 1, 1);
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001140 default:
1141 BUG();
1142 }
1143 break;
Vivek Goyal062a6442010-09-15 17:06:33 -04001144 default:
1145 BUG();
1146 }
1147
1148 return 0;
1149}
1150
Tejun Heo4bfd4822012-03-05 13:15:08 -08001151static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
Vivek Goyal062a6442010-09-15 17:06:33 -04001152{
1153 struct blkio_group *blkg;
1154 struct hlist_node *n;
Vivek Goyal062a6442010-09-15 17:06:33 -04001155
1156 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1157 return -EINVAL;
1158
1159 spin_lock(&blkio_list_lock);
1160 spin_lock_irq(&blkcg->lock);
1161 blkcg->weight = (unsigned int)val;
1162
Tejun Heo4bfd4822012-03-05 13:15:08 -08001163 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1164 if (blkg->plid == plid && !blkg->conf.weight)
1165 blkio_update_group_weight(blkg, blkcg->weight);
Vivek Goyal062a6442010-09-15 17:06:33 -04001166
Vivek Goyal062a6442010-09-15 17:06:33 -04001167 spin_unlock_irq(&blkcg->lock);
1168 spin_unlock(&blkio_list_lock);
1169 return 0;
1170}
1171
1172static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1173 struct blkio_cgroup *blkcg;
1174 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1175 int name = BLKIOFILE_ATTR(cft->private);
1176
1177 blkcg = cgroup_to_blkio_cgroup(cgrp);
1178
1179 switch(plid) {
1180 case BLKIO_POLICY_PROP:
1181 switch(name) {
1182 case BLKIO_PROP_weight:
1183 return (u64)blkcg->weight;
1184 }
1185 break;
1186 default:
1187 BUG();
1188 }
1189 return 0;
1190}
1191
1192static int
1193blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1194{
1195 struct blkio_cgroup *blkcg;
1196 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1197 int name = BLKIOFILE_ATTR(cft->private);
1198
1199 blkcg = cgroup_to_blkio_cgroup(cgrp);
1200
1201 switch(plid) {
1202 case BLKIO_POLICY_PROP:
1203 switch(name) {
1204 case BLKIO_PROP_weight:
Tejun Heo4bfd4822012-03-05 13:15:08 -08001205 return blkio_weight_write(blkcg, plid, val);
Vivek Goyal062a6442010-09-15 17:06:33 -04001206 }
1207 break;
1208 default:
1209 BUG();
1210 }
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001211
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001212 return 0;
1213}
1214
Vivek Goyal31e4c282009-12-03 12:59:42 -05001215struct cftype blkio_files[] = {
1216 {
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001217 .name = "weight_device",
Vivek Goyal062a6442010-09-15 17:06:33 -04001218 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1219 BLKIO_PROP_weight_device),
1220 .read_seq_string = blkiocg_file_read,
1221 .write_string = blkiocg_file_write,
Gui Jianfeng34d0f172010-04-13 16:05:49 +08001222 .max_write_len = 256,
1223 },
1224 {
Vivek Goyal31e4c282009-12-03 12:59:42 -05001225 .name = "weight",
Vivek Goyal062a6442010-09-15 17:06:33 -04001226 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1227 BLKIO_PROP_weight),
1228 .read_u64 = blkiocg_file_read_u64,
1229 .write_u64 = blkiocg_file_write_u64,
Vivek Goyal31e4c282009-12-03 12:59:42 -05001230 },
Vivek Goyal22084192009-12-03 12:59:49 -05001231 {
1232 .name = "time",
Vivek Goyal13f98252010-10-01 14:49:41 +02001233 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1234 BLKIO_PROP_time),
1235 .read_map = blkiocg_file_read_map,
Vivek Goyal22084192009-12-03 12:59:49 -05001236 },
1237 {
1238 .name = "sectors",
Vivek Goyal13f98252010-10-01 14:49:41 +02001239 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1240 BLKIO_PROP_sectors),
1241 .read_map = blkiocg_file_read_map,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001242 },
1243 {
1244 .name = "io_service_bytes",
Vivek Goyal13f98252010-10-01 14:49:41 +02001245 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1246 BLKIO_PROP_io_service_bytes),
1247 .read_map = blkiocg_file_read_map,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001248 },
1249 {
1250 .name = "io_serviced",
Vivek Goyal13f98252010-10-01 14:49:41 +02001251 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1252 BLKIO_PROP_io_serviced),
1253 .read_map = blkiocg_file_read_map,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001254 },
1255 {
1256 .name = "io_service_time",
Vivek Goyal13f98252010-10-01 14:49:41 +02001257 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1258 BLKIO_PROP_io_service_time),
1259 .read_map = blkiocg_file_read_map,
Divyesh Shah303a3ac2010-04-01 15:01:24 -07001260 },
1261 {
1262 .name = "io_wait_time",
Vivek Goyal13f98252010-10-01 14:49:41 +02001263 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1264 BLKIO_PROP_io_wait_time),
1265 .read_map = blkiocg_file_read_map,
Divyesh Shah84c124d2010-04-09 08:31:19 +02001266 },
1267 {
Divyesh Shah812d4022010-04-08 21:14:23 -07001268 .name = "io_merged",
Vivek Goyal13f98252010-10-01 14:49:41 +02001269 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1270 BLKIO_PROP_io_merged),
1271 .read_map = blkiocg_file_read_map,
Divyesh Shah812d4022010-04-08 21:14:23 -07001272 },
1273 {
Divyesh Shahcdc11842010-04-08 21:15:10 -07001274 .name = "io_queued",
Vivek Goyal13f98252010-10-01 14:49:41 +02001275 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1276 BLKIO_PROP_io_queued),
1277 .read_map = blkiocg_file_read_map,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001278 },
1279 {
Divyesh Shah84c124d2010-04-09 08:31:19 +02001280 .name = "reset_stats",
1281 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -05001282 },
Vivek Goyal13f98252010-10-01 14:49:41 +02001283#ifdef CONFIG_BLK_DEV_THROTTLING
1284 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001285 .name = "throttle.read_bps_device",
1286 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1287 BLKIO_THROTL_read_bps_device),
1288 .read_seq_string = blkiocg_file_read,
1289 .write_string = blkiocg_file_write,
1290 .max_write_len = 256,
1291 },
1292
1293 {
1294 .name = "throttle.write_bps_device",
1295 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1296 BLKIO_THROTL_write_bps_device),
1297 .read_seq_string = blkiocg_file_read,
1298 .write_string = blkiocg_file_write,
1299 .max_write_len = 256,
1300 },
Vivek Goyal7702e8f2010-09-15 17:06:36 -04001301
1302 {
1303 .name = "throttle.read_iops_device",
1304 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1305 BLKIO_THROTL_read_iops_device),
1306 .read_seq_string = blkiocg_file_read,
1307 .write_string = blkiocg_file_write,
1308 .max_write_len = 256,
1309 },
1310
1311 {
1312 .name = "throttle.write_iops_device",
1313 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1314 BLKIO_THROTL_write_iops_device),
1315 .read_seq_string = blkiocg_file_read,
1316 .write_string = blkiocg_file_write,
1317 .max_write_len = 256,
1318 },
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001319 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001320 .name = "throttle.io_service_bytes",
1321 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1322 BLKIO_THROTL_io_service_bytes),
1323 .read_map = blkiocg_file_read_map,
1324 },
1325 {
Vivek Goyal4c9eefa2010-09-15 17:06:34 -04001326 .name = "throttle.io_serviced",
1327 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1328 BLKIO_THROTL_io_serviced),
1329 .read_map = blkiocg_file_read_map,
1330 },
Vivek Goyal13f98252010-10-01 14:49:41 +02001331#endif /* CONFIG_BLK_DEV_THROTTLING */
1332
Vivek Goyal22084192009-12-03 12:59:49 -05001333#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -07001334 {
1335 .name = "avg_queue_size",
Vivek Goyal062a6442010-09-15 17:06:33 -04001336 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1337 BLKIO_PROP_avg_queue_size),
1338 .read_map = blkiocg_file_read_map,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001339 },
1340 {
Divyesh Shah812df482010-04-08 21:15:35 -07001341 .name = "group_wait_time",
Vivek Goyal062a6442010-09-15 17:06:33 -04001342 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1343 BLKIO_PROP_group_wait_time),
1344 .read_map = blkiocg_file_read_map,
Divyesh Shah812df482010-04-08 21:15:35 -07001345 },
1346 {
1347 .name = "idle_time",
Vivek Goyal062a6442010-09-15 17:06:33 -04001348 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1349 BLKIO_PROP_idle_time),
1350 .read_map = blkiocg_file_read_map,
Divyesh Shah812df482010-04-08 21:15:35 -07001351 },
1352 {
1353 .name = "empty_time",
Vivek Goyal062a6442010-09-15 17:06:33 -04001354 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1355 BLKIO_PROP_empty_time),
1356 .read_map = blkiocg_file_read_map,
Divyesh Shah812df482010-04-08 21:15:35 -07001357 },
1358 {
Vivek Goyal22084192009-12-03 12:59:49 -05001359 .name = "dequeue",
Vivek Goyal062a6442010-09-15 17:06:33 -04001360 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1361 BLKIO_PROP_dequeue),
1362 .read_map = blkiocg_file_read_map,
Divyesh Shahcdc11842010-04-08 21:15:10 -07001363 },
Justin TerAvest9026e522011-03-22 21:26:54 +01001364 {
1365 .name = "unaccounted_time",
1366 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1367 BLKIO_PROP_unaccounted_time),
1368 .read_map = blkiocg_file_read_map,
1369 },
Vivek Goyal22084192009-12-03 12:59:49 -05001370#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -05001371};
1372
1373static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1374{
1375 return cgroup_add_files(cgroup, subsys, blkio_files,
1376 ARRAY_SIZE(blkio_files));
1377}
1378
1379static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1380{
1381 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001382 unsigned long flags;
1383 struct blkio_group *blkg;
Tejun Heoca32aef2012-03-05 13:15:03 -08001384 struct request_queue *q;
Vivek Goyal3e252062009-12-04 10:36:42 -05001385 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001386
Vivek Goyalb1c35762009-12-03 12:59:47 -05001387 rcu_read_lock();
Jens Axboe0f3942a2010-05-03 14:28:55 +02001388 do {
1389 spin_lock_irqsave(&blkcg->lock, flags);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001390
Jens Axboe0f3942a2010-05-03 14:28:55 +02001391 if (hlist_empty(&blkcg->blkg_list)) {
1392 spin_unlock_irqrestore(&blkcg->lock, flags);
1393 break;
1394 }
1395
1396 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1397 blkcg_node);
Tejun Heoca32aef2012-03-05 13:15:03 -08001398 q = rcu_dereference(blkg->q);
Jens Axboe0f3942a2010-05-03 14:28:55 +02001399 __blkiocg_del_blkio_group(blkg);
1400
Vivek Goyalb1c35762009-12-03 12:59:47 -05001401 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001402
Jens Axboe0f3942a2010-05-03 14:28:55 +02001403 /*
1404 * This blkio_group is being unlinked as associated cgroup is
1405 * going away. Let all the IO controlling policies know about
Vivek Goyal61014e92010-10-01 14:49:44 +02001406 * this event.
Jens Axboe0f3942a2010-05-03 14:28:55 +02001407 */
1408 spin_lock(&blkio_list_lock);
Vivek Goyal61014e92010-10-01 14:49:44 +02001409 list_for_each_entry(blkiop, &blkio_list, list) {
1410 if (blkiop->plid != blkg->plid)
1411 continue;
Tejun Heoca32aef2012-03-05 13:15:03 -08001412 blkiop->ops.blkio_unlink_group_fn(q, blkg);
Vivek Goyal61014e92010-10-01 14:49:44 +02001413 }
Jens Axboe0f3942a2010-05-03 14:28:55 +02001414 spin_unlock(&blkio_list_lock);
1415 } while (1);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001416
Vivek Goyal31e4c282009-12-03 12:59:42 -05001417 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001418 rcu_read_unlock();
Ben Blum67523c42010-03-10 15:22:11 -08001419 if (blkcg != &blkio_root_cgroup)
1420 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -05001421}
1422
1423static struct cgroup_subsys_state *
1424blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1425{
Li Zefan03415092010-05-07 08:57:00 +02001426 struct blkio_cgroup *blkcg;
1427 struct cgroup *parent = cgroup->parent;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001428
Li Zefan03415092010-05-07 08:57:00 +02001429 if (!parent) {
Vivek Goyal31e4c282009-12-03 12:59:42 -05001430 blkcg = &blkio_root_cgroup;
1431 goto done;
1432 }
1433
Vivek Goyal31e4c282009-12-03 12:59:42 -05001434 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1435 if (!blkcg)
1436 return ERR_PTR(-ENOMEM);
1437
1438 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1439done:
1440 spin_lock_init(&blkcg->lock);
1441 INIT_HLIST_HEAD(&blkcg->blkg_list);
1442
1443 return &blkcg->css;
1444}
1445
1446/*
1447 * We cannot support shared io contexts, as we have no mean to support
1448 * two tasks with the same ioc in two different groups without major rework
1449 * of the main cic data structures. For now we allow a task to change
1450 * its cgroup only if it's the only owner of its ioc.
1451 */
Tejun Heobb9d97b2011-12-12 18:12:21 -08001452static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1453 struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001454{
Tejun Heobb9d97b2011-12-12 18:12:21 -08001455 struct task_struct *task;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001456 struct io_context *ioc;
1457 int ret = 0;
1458
1459 /* task_lock() is needed to avoid races with exit_io_context() */
Tejun Heobb9d97b2011-12-12 18:12:21 -08001460 cgroup_taskset_for_each(task, cgrp, tset) {
1461 task_lock(task);
1462 ioc = task->io_context;
1463 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1464 ret = -EINVAL;
1465 task_unlock(task);
1466 if (ret)
1467 break;
1468 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001469 return ret;
1470}
1471
Tejun Heobb9d97b2011-12-12 18:12:21 -08001472static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1473 struct cgroup_taskset *tset)
Vivek Goyal31e4c282009-12-03 12:59:42 -05001474{
Tejun Heobb9d97b2011-12-12 18:12:21 -08001475 struct task_struct *task;
Vivek Goyal31e4c282009-12-03 12:59:42 -05001476 struct io_context *ioc;
1477
Tejun Heobb9d97b2011-12-12 18:12:21 -08001478 cgroup_taskset_for_each(task, cgrp, tset) {
Linus Torvaldsb3c9dd12012-01-15 12:24:45 -08001479 /* we don't lose anything even if ioc allocation fails */
1480 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1481 if (ioc) {
1482 ioc_cgroup_changed(ioc);
Tejun Heo11a31222012-02-07 07:51:30 +01001483 put_io_context(ioc);
Linus Torvaldsb3c9dd12012-01-15 12:24:45 -08001484 }
Tejun Heobb9d97b2011-12-12 18:12:21 -08001485 }
Vivek Goyal31e4c282009-12-03 12:59:42 -05001486}
1487
Vivek Goyal3e252062009-12-04 10:36:42 -05001488void blkio_policy_register(struct blkio_policy_type *blkiop)
1489{
1490 spin_lock(&blkio_list_lock);
Tejun Heo035d10b2012-03-05 13:15:04 -08001491
1492 BUG_ON(blkio_policy[blkiop->plid]);
1493 blkio_policy[blkiop->plid] = blkiop;
Vivek Goyal3e252062009-12-04 10:36:42 -05001494 list_add_tail(&blkiop->list, &blkio_list);
Tejun Heo035d10b2012-03-05 13:15:04 -08001495
Vivek Goyal3e252062009-12-04 10:36:42 -05001496 spin_unlock(&blkio_list_lock);
1497}
1498EXPORT_SYMBOL_GPL(blkio_policy_register);
1499
1500void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1501{
1502 spin_lock(&blkio_list_lock);
Tejun Heo035d10b2012-03-05 13:15:04 -08001503
1504 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1505 blkio_policy[blkiop->plid] = NULL;
Vivek Goyal3e252062009-12-04 10:36:42 -05001506 list_del_init(&blkiop->list);
Tejun Heo035d10b2012-03-05 13:15:04 -08001507
Vivek Goyal3e252062009-12-04 10:36:42 -05001508 spin_unlock(&blkio_list_lock);
1509}
1510EXPORT_SYMBOL_GPL(blkio_policy_unregister);