blob: 86a167b794628006db553d5b264fffde31f0e14e [file] [log] [blame]
Patrick Bellasiffbceda2015-06-23 09:17:54 +01001#include <linux/cgroup.h>
2#include <linux/err.h>
3#include <linux/percpu.h>
4#include <linux/printk.h>
5#include <linux/slab.h>
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -08006#include <linux/kernel.h>
Patrick Bellasiedd28d32015-07-07 15:33:20 +01007#include <linux/rcupdate.h>
Patrick Bellasiae710302015-06-23 09:17:54 +01008#include <linux/slab.h>
Patrick Bellasi050dcb82015-06-22 13:49:07 +01009#include <trace/events/sched.h>
Patrick Bellasiffbceda2015-06-23 09:17:54 +010010
Patrick Bellasi62c1c062015-06-22 18:11:44 +010011#include "sched.h"
Patrick Bellasic5b20422016-07-29 15:45:57 +010012#include "tune.h"
Patrick Bellasi62c1c062015-06-22 18:11:44 +010013
14unsigned int sysctl_sched_cfs_boost __read_mostly;
15
Patrick Bellasiffbceda2015-06-23 09:17:54 +010016#ifdef CONFIG_CGROUP_SCHEDTUNE
Patrick Bellasid2489002016-07-28 18:44:40 +010017static bool schedtune_initialized = false;
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -080018#endif /* CONFIG_CGROUP_SCHEDTUNE */
Patrick Bellasiffbceda2015-06-23 09:17:54 +010019
Patrick Bellasi69fa4c72015-06-22 18:11:44 +010020unsigned int sysctl_sched_cfs_boost __read_mostly;
21
Patrick Bellasic5b20422016-07-29 15:45:57 +010022extern struct target_nrg schedtune_target_nrg;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000023
24/* Performance Boost region (B) threshold params */
25static int perf_boost_idx;
26
27/* Performance Constraint region (C) threshold params */
28static int perf_constrain_idx;
29
30/**
31 * Performance-Energy (P-E) Space thresholds constants
32 */
33struct threshold_params {
34 int nrg_gain;
35 int cap_gain;
36};
37
38/*
39 * System specific P-E space thresholds constants
40 */
41static struct threshold_params
42threshold_gains[] = {
Patrick Bellasid5563d32016-07-29 15:32:26 +010043 { 0, 5 }, /* < 10% */
44 { 1, 5 }, /* < 20% */
45 { 2, 5 }, /* < 30% */
46 { 3, 5 }, /* < 40% */
47 { 4, 5 }, /* < 50% */
48 { 5, 4 }, /* < 60% */
49 { 5, 3 }, /* < 70% */
50 { 5, 2 }, /* < 80% */
51 { 5, 1 }, /* < 90% */
52 { 5, 0 } /* <= 100% */
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000053};
54
55static int
56__schedtune_accept_deltas(int nrg_delta, int cap_delta,
57 int perf_boost_idx, int perf_constrain_idx)
58{
59 int payoff = -INT_MAX;
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010060 int gain_idx = -1;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000061
62 /* Performance Boost (B) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010063 if (nrg_delta >= 0 && cap_delta > 0)
64 gain_idx = perf_boost_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000065 /* Performance Constraint (C) region */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010066 else if (nrg_delta < 0 && cap_delta <= 0)
67 gain_idx = perf_constrain_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +000068
69 /* Default: reject schedule candidate */
Patrick Bellasi2ed513e2016-07-28 17:38:25 +010070 if (gain_idx == -1)
71 return payoff;
72
73 /*
74 * Evaluate "Performance Boost" vs "Energy Increase"
75 *
76 * - Performance Boost (B) region
77 *
78 * Condition: nrg_delta > 0 && cap_delta > 0
79 * Payoff criteria:
80 * cap_gain / nrg_gain < cap_delta / nrg_delta =
81 * cap_gain * nrg_delta < cap_delta * nrg_gain
82 * Note that since both nrg_gain and nrg_delta are positive, the
83 * inequality does not change. Thus:
84 *
85 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
86 *
87 * - Performance Constraint (C) region
88 *
89 * Condition: nrg_delta < 0 && cap_delta < 0
90 * payoff criteria:
91 * cap_gain / nrg_gain > cap_delta / nrg_delta =
92 * cap_gain * nrg_delta < cap_delta * nrg_gain
93 * Note that since nrg_gain > 0 while nrg_delta < 0, the
94 * inequality change. Thus:
95 *
96 * payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
97 *
98 * This means that, in case of same positive defined {cap,nrg}_gain
99 * for both the B and C regions, we can use the same payoff formula
100 * where a positive value represents the accept condition.
101 */
102 payoff = cap_delta * threshold_gains[gain_idx].nrg_gain;
103 payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
104
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000105 return payoff;
106}
107
Patrick Bellasiae710302015-06-23 09:17:54 +0100108#ifdef CONFIG_CGROUP_SCHEDTUNE
Vikram Mulukutlad056dbc2017-02-07 18:58:07 -0800109
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100110/*
111 * EAS scheduler tunables for task groups.
112 */
113
114/* SchdTune tunables for a group of tasks */
115struct schedtune {
116 /* SchedTune CGroup subsystem */
117 struct cgroup_subsys_state css;
118
119 /* Boost group allocated ID */
120 int idx;
121
122 /* Boost value for tasks on that SchedTune CGroup */
123 int boost;
124
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800125#ifdef CONFIG_SCHED_WALT
Syed Rameez Mustafa084075b2016-08-31 16:54:12 -0700126 /* Toggle ability to override sched boost enabled */
127 bool sched_boost_no_override;
128
129 /*
130 * Controls whether a cgroup is eligible for sched boost or not. This
131 * can temporariliy be disabled by the kernel based on the no_override
132 * flag above.
133 */
134 bool sched_boost_enabled;
135
136 /*
137 * This tracks the default value of sched_boost_enabled and is used
138 * restore the value following any temporary changes to that flag.
139 */
140 bool sched_boost_enabled_backup;
141
142 /*
143 * Controls whether tasks of this cgroup should be colocated with each
144 * other and tasks of other cgroups that have the same flag turned on.
145 */
146 bool colocate;
147
148 /* Controls whether further updates are allowed to the colocate flag */
149 bool colocate_update_disabled;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800150#endif /* CONFIG_SCHED_WALT */
Syed Rameez Mustafa084075b2016-08-31 16:54:12 -0700151
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000152 /* Performance Boost (B) region threshold params */
153 int perf_boost_idx;
154
155 /* Performance Constraint (C) region threshold params */
156 int perf_constrain_idx;
Srinath Sridharan42503db2016-07-14 13:09:03 -0700157
158 /* Hint to bias scheduling of tasks on that SchedTune CGroup
159 * towards idle CPUs */
160 int prefer_idle;
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100161};
162
163static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
164{
Syed Rameez Mustafa642cef52016-10-11 18:24:43 -0700165 return container_of(css, struct schedtune, css);
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100166}
167
168static inline struct schedtune *task_schedtune(struct task_struct *tsk)
169{
170 return css_st(task_css(tsk, schedtune_cgrp_id));
171}
172
173static inline struct schedtune *parent_st(struct schedtune *st)
174{
175 return css_st(st->css.parent);
176}
177
178/*
179 * SchedTune root control group
180 * The root control group is used to defined a system-wide boosting tuning,
181 * which is applied to all tasks in the system.
182 * Task specific boost tuning could be specified by creating and
183 * configuring a child control group under the root one.
184 * By default, system-wide boosting is disabled, i.e. no boosting is applied
185 * to tasks which are not into a child control group.
186 */
187static struct schedtune
188root_schedtune = {
189 .boost = 0,
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800190#ifdef CONFIG_SCHED_WALT
Syed Rameez Mustafa084075b2016-08-31 16:54:12 -0700191 .sched_boost_no_override = false,
192 .sched_boost_enabled = true,
193 .sched_boost_enabled_backup = true,
194 .colocate = false,
195 .colocate_update_disabled = false,
196#endif
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000197 .perf_boost_idx = 0,
198 .perf_constrain_idx = 0,
Srinath Sridharan42503db2016-07-14 13:09:03 -0700199 .prefer_idle = 0,
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100200};
201
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000202int
203schedtune_accept_deltas(int nrg_delta, int cap_delta,
204 struct task_struct *task)
205{
206 struct schedtune *ct;
207 int perf_boost_idx;
208 int perf_constrain_idx;
209
210 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000211 if (nrg_delta < 0 && cap_delta > 0) {
212 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000213 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000214 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000215
216 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000217 if (nrg_delta > 0 && cap_delta < 0) {
218 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000219 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000220 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000221
222 /* Get task specific perf Boost/Constraints indexes */
223 rcu_read_lock();
224 ct = task_schedtune(task);
225 perf_boost_idx = ct->perf_boost_idx;
226 perf_constrain_idx = ct->perf_constrain_idx;
227 rcu_read_unlock();
228
229 return __schedtune_accept_deltas(nrg_delta, cap_delta,
230 perf_boost_idx, perf_constrain_idx);
231}
232
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100233/*
234 * Maximum number of boost groups to support
235 * When per-task boosting is used we still allow only limited number of
236 * boost groups for two main reasons:
237 * 1. on a real system we usually have only few classes of workloads which
238 * make sense to boost with different values (e.g. background vs foreground
239 * tasks, interactive vs low-priority tasks)
240 * 2. a limited number allows for a simpler and more memory/time efficient
241 * implementation especially for the computation of the per-CPU boost
242 * value
243 */
Syed Rameez Mustafaca545c82016-09-02 17:51:39 -0700244#define BOOSTGROUPS_COUNT 5
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100245
246/* Array of configured boostgroups */
247static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
248 &root_schedtune,
249 NULL,
250};
251
252/* SchedTune boost groups
253 * Keep track of all the boost groups which impact on CPU, for example when a
254 * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
255 * likely with different boost values.
256 * Since on each system we expect only a limited number of boost groups, here
257 * we use a simple array to keep track of the metrics required to compute the
258 * maximum per-CPU boosting value.
259 */
260struct boost_groups {
Srinath Sridharane71c4252016-07-28 17:28:55 +0100261 bool idle;
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100262 /* Maximum boost value for all RUNNABLE tasks on a CPU */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100263 int boost_max;
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100264 struct {
265 /* The boost for tasks on that boost group */
Srinath Sridharane71c4252016-07-28 17:28:55 +0100266 int boost;
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100267 /* Count of RUNNABLE tasks on that boost group */
268 unsigned tasks;
269 } group[BOOSTGROUPS_COUNT];
Patrick Bellasid2489002016-07-28 18:44:40 +0100270 /* CPU's boost group locking */
271 raw_spinlock_t lock;
Patrick Bellasiffbceda2015-06-23 09:17:54 +0100272};
273
274/* Boost groups affecting each CPU in the system */
275DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
276
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800277#ifdef CONFIG_SCHED_WALT
Vikram Mulukutlad056dbc2017-02-07 18:58:07 -0800278static inline void init_sched_boost(struct schedtune *st)
279{
280 st->sched_boost_no_override = false;
281 st->sched_boost_enabled = true;
282 st->sched_boost_enabled_backup = st->sched_boost_enabled;
283 st->colocate = false;
284 st->colocate_update_disabled = false;
285}
286
287bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2)
288{
289 return task_schedtune(tsk1) == task_schedtune(tsk2);
290}
291
292void update_cgroup_boost_settings(void)
293{
294 int i;
295
296 for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
297 if (!allocated_group[i])
298 break;
299
300 if (allocated_group[i]->sched_boost_no_override)
301 continue;
302
303 allocated_group[i]->sched_boost_enabled = false;
304 }
305}
306
307void restore_cgroup_boost_settings(void)
308{
309 int i;
310
311 for (i = 0; i < BOOSTGROUPS_COUNT; i++) {
312 if (!allocated_group[i])
313 break;
314
315 allocated_group[i]->sched_boost_enabled =
316 allocated_group[i]->sched_boost_enabled_backup;
317 }
318}
319
320bool task_sched_boost(struct task_struct *p)
321{
322 struct schedtune *st = task_schedtune(p);
323
324 return st->sched_boost_enabled;
325}
326
327static u64
328sched_boost_override_read(struct cgroup_subsys_state *css,
329 struct cftype *cft)
330{
331 struct schedtune *st = css_st(css);
332
333 return st->sched_boost_no_override;
334}
335
336static int sched_boost_override_write(struct cgroup_subsys_state *css,
337 struct cftype *cft, u64 override)
338{
339 struct schedtune *st = css_st(css);
340
341 st->sched_boost_no_override = !!override;
342
343 return 0;
344}
345
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800346#endif /* CONFIG_SCHED_WALT */
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800347
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000348static void
349schedtune_cpu_update(int cpu)
350{
351 struct boost_groups *bg;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100352 int boost_max;
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000353 int idx;
354
355 bg = &per_cpu(cpu_boost_groups, cpu);
356
357 /* The root boost group is always active */
358 boost_max = bg->group[0].boost;
359 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
360 /*
361 * A boost group affects a CPU only if it has
362 * RUNNABLE tasks on that CPU
363 */
364 if (bg->group[idx].tasks == 0)
365 continue;
Srinath Sridharane71c4252016-07-28 17:28:55 +0100366
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000367 boost_max = max(boost_max, bg->group[idx].boost);
368 }
Srinath Sridharane71c4252016-07-28 17:28:55 +0100369 /* Ensures boost_max is non-negative when all cgroup boost values
370 * are neagtive. Avoids under-accounting of cpu capacity which may cause
371 * task stacking and frequency spikes.*/
372 boost_max = max(boost_max, 0);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000373 bg->boost_max = boost_max;
374}
375
376static int
377schedtune_boostgroup_update(int idx, int boost)
378{
379 struct boost_groups *bg;
380 int cur_boost_max;
381 int old_boost;
382 int cpu;
383
384 /* Update per CPU boost groups */
385 for_each_possible_cpu(cpu) {
386 bg = &per_cpu(cpu_boost_groups, cpu);
387
388 /*
389 * Keep track of current boost values to compute the per CPU
390 * maximum only when it has been affected by the new value of
391 * the updated boost group
392 */
393 cur_boost_max = bg->boost_max;
394 old_boost = bg->group[idx].boost;
395
396 /* Update the boost value of this boost group */
397 bg->group[idx].boost = boost;
398
399 /* Check if this update increase current max */
400 if (boost > cur_boost_max && bg->group[idx].tasks) {
401 bg->boost_max = boost;
Patrick Bellasi953b1042015-06-24 15:36:08 +0100402 trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000403 continue;
404 }
405
406 /* Check if this update has decreased current max */
Patrick Bellasi953b1042015-06-24 15:36:08 +0100407 if (cur_boost_max == old_boost && old_boost > boost) {
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000408 schedtune_cpu_update(cpu);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100409 trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
410 continue;
411 }
412
413 trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000414 }
415
416 return 0;
417}
418
Patrick Bellasid2489002016-07-28 18:44:40 +0100419#define ENQUEUE_TASK 1
420#define DEQUEUE_TASK -1
421
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100422static inline void
423schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
424{
Patrick Bellasid2489002016-07-28 18:44:40 +0100425 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
426 int tasks = bg->group[idx].tasks + task_count;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100427
428 /* Update boosted tasks count while avoiding to make it negative */
Patrick Bellasid2489002016-07-28 18:44:40 +0100429 bg->group[idx].tasks = max(0, tasks);
Patrick Bellasi953b1042015-06-24 15:36:08 +0100430
431 trace_sched_tune_tasks_update(p, cpu, tasks, idx,
432 bg->group[idx].boost, bg->boost_max);
433
Patrick Bellasid2489002016-07-28 18:44:40 +0100434 /* Boost group activation or deactivation on that RQ */
435 if (tasks == 1 || tasks == 0)
436 schedtune_cpu_update(cpu);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100437}
438
439/*
440 * NOTE: This function must be called while holding the lock on the CPU RQ
441 */
442void schedtune_enqueue_task(struct task_struct *p, int cpu)
443{
Patrick Bellasid2489002016-07-28 18:44:40 +0100444 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
445 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100446 struct schedtune *st;
447 int idx;
448
Patrick Bellasid2489002016-07-28 18:44:40 +0100449 if (!unlikely(schedtune_initialized))
450 return;
451
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100452 /*
453 * When a task is marked PF_EXITING by do_exit() it's going to be
454 * dequeued and enqueued multiple times in the exit path.
455 * Thus we avoid any further update, since we do not want to change
456 * CPU boosting while the task is exiting.
457 */
458 if (p->flags & PF_EXITING)
459 return;
460
Patrick Bellasid2489002016-07-28 18:44:40 +0100461 /*
462 * Boost group accouting is protected by a per-cpu lock and requires
463 * interrupt to be disabled to avoid race conditions for example on
464 * do_exit()::cgroup_exit() and task migration.
465 */
466 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100467 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100468
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100469 st = task_schedtune(p);
470 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100471
Patrick Bellasid2489002016-07-28 18:44:40 +0100472 schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
473
474 rcu_read_unlock();
475 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
476}
477
Patrick Bellasid2489002016-07-28 18:44:40 +0100478int schedtune_can_attach(struct cgroup_taskset *tset)
479{
480 struct task_struct *task;
481 struct cgroup_subsys_state *css;
482 struct boost_groups *bg;
483 struct rq_flags irq_flags;
484 unsigned int cpu;
485 struct rq *rq;
486 int src_bg; /* Source boost group index */
487 int dst_bg; /* Destination boost group index */
488 int tasks;
489
490 if (!unlikely(schedtune_initialized))
491 return 0;
492
493
494 cgroup_taskset_for_each(task, css, tset) {
495
496 /*
497 * Lock the CPU's RQ the task is enqueued to avoid race
498 * conditions with migration code while the task is being
499 * accounted
500 */
501 rq = lock_rq_of(task, &irq_flags);
502
503 if (!task->on_rq) {
504 unlock_rq_of(rq, task, &irq_flags);
505 continue;
506 }
507
508 /*
509 * Boost group accouting is protected by a per-cpu lock and requires
510 * interrupt to be disabled to avoid race conditions on...
511 */
512 cpu = cpu_of(rq);
513 bg = &per_cpu(cpu_boost_groups, cpu);
514 raw_spin_lock(&bg->lock);
515
516 dst_bg = css_st(css)->idx;
517 src_bg = task_schedtune(task)->idx;
518
519 /*
520 * Current task is not changing boostgroup, which can
521 * happen when the new hierarchy is in use.
522 */
523 if (unlikely(dst_bg == src_bg)) {
524 raw_spin_unlock(&bg->lock);
525 unlock_rq_of(rq, task, &irq_flags);
526 continue;
527 }
528
529 /*
530 * This is the case of a RUNNABLE task which is switching its
531 * current boost group.
532 */
533
534 /* Move task from src to dst boost group */
535 tasks = bg->group[src_bg].tasks - 1;
536 bg->group[src_bg].tasks = max(0, tasks);
537 bg->group[dst_bg].tasks += 1;
538
539 raw_spin_unlock(&bg->lock);
540 unlock_rq_of(rq, task, &irq_flags);
541
542 /* Update CPU boost group */
543 if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
544 schedtune_cpu_update(task_cpu(task));
545
546 }
547
548 return 0;
549}
550
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800551#ifdef CONFIG_SCHED_WALT
Vikram Mulukutlad056dbc2017-02-07 18:58:07 -0800552static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css,
553 struct cftype *cft)
554{
555 struct schedtune *st = css_st(css);
556
557 return st->sched_boost_enabled;
558}
559
560static int sched_boost_enabled_write(struct cgroup_subsys_state *css,
561 struct cftype *cft, u64 enable)
562{
563 struct schedtune *st = css_st(css);
564
565 st->sched_boost_enabled = !!enable;
566 st->sched_boost_enabled_backup = st->sched_boost_enabled;
567
568 return 0;
569}
570
571static u64 sched_colocate_read(struct cgroup_subsys_state *css,
572 struct cftype *cft)
573{
574 struct schedtune *st = css_st(css);
575
576 return st->colocate;
577}
578
579static int sched_colocate_write(struct cgroup_subsys_state *css,
580 struct cftype *cft, u64 colocate)
581{
582 struct schedtune *st = css_st(css);
583
584 if (st->colocate_update_disabled)
585 return -EPERM;
586
587 st->colocate = !!colocate;
588 st->colocate_update_disabled = true;
589 return 0;
590}
591
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800592#else /* CONFIG_SCHED_WALT */
Vikram Mulukutlad056dbc2017-02-07 18:58:07 -0800593
594static inline void init_sched_boost(struct schedtune *st) { }
595
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800596#endif /* CONFIG_SCHED_WALT */
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800597
Patrick Bellasid2489002016-07-28 18:44:40 +0100598void schedtune_cancel_attach(struct cgroup_taskset *tset)
599{
600 /* This can happen only if SchedTune controller is mounted with
601 * other hierarchies ane one of them fails. Since usually SchedTune is
602 * mouted on its own hierarcy, for the time being we do not implement
603 * a proper rollback mechanism */
604 WARN(1, "SchedTune cancel attach not implemented");
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100605}
606
607/*
608 * NOTE: This function must be called while holding the lock on the CPU RQ
609 */
610void schedtune_dequeue_task(struct task_struct *p, int cpu)
611{
Patrick Bellasid2489002016-07-28 18:44:40 +0100612 struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
613 unsigned long irq_flags;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100614 struct schedtune *st;
615 int idx;
616
Patrick Bellasid2489002016-07-28 18:44:40 +0100617 if (!unlikely(schedtune_initialized))
618 return;
619
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100620 /*
621 * When a task is marked PF_EXITING by do_exit() it's going to be
622 * dequeued and enqueued multiple times in the exit path.
623 * Thus we avoid any further update, since we do not want to change
624 * CPU boosting while the task is exiting.
Patrick Bellasid2489002016-07-28 18:44:40 +0100625 * The last dequeue is already enforce by the do_exit() code path
626 * via schedtune_exit_task().
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100627 */
628 if (p->flags & PF_EXITING)
629 return;
630
Patrick Bellasid2489002016-07-28 18:44:40 +0100631 /*
632 * Boost group accouting is protected by a per-cpu lock and requires
633 * interrupt to be disabled to avoid race conditions on...
634 */
635 raw_spin_lock_irqsave(&bg->lock, irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100636 rcu_read_lock();
Patrick Bellasid2489002016-07-28 18:44:40 +0100637
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100638 st = task_schedtune(p);
639 idx = st->idx;
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100640
Patrick Bellasid2489002016-07-28 18:44:40 +0100641 schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
642
643 rcu_read_unlock();
644 raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
645}
646
647void schedtune_exit_task(struct task_struct *tsk)
648{
649 struct schedtune *st;
650 struct rq_flags irq_flags;
651 unsigned int cpu;
652 struct rq *rq;
653 int idx;
654
655 if (!unlikely(schedtune_initialized))
656 return;
657
658 rq = lock_rq_of(tsk, &irq_flags);
659 rcu_read_lock();
660
661 cpu = cpu_of(rq);
662 st = task_schedtune(tsk);
663 idx = st->idx;
664 schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
665
666 rcu_read_unlock();
667 unlock_rq_of(rq, tsk, &irq_flags);
Patrick Bellasiedd28d32015-07-07 15:33:20 +0100668}
669
670int schedtune_cpu_boost(int cpu)
671{
672 struct boost_groups *bg;
673
674 bg = &per_cpu(cpu_boost_groups, cpu);
675 return bg->boost_max;
676}
677
Patrick Bellasi9b2b8da2016-01-14 18:31:53 +0000678int schedtune_task_boost(struct task_struct *p)
679{
680 struct schedtune *st;
681 int task_boost;
682
683 /* Get task boost value */
684 rcu_read_lock();
685 st = task_schedtune(p);
686 task_boost = st->boost;
687 rcu_read_unlock();
688
689 return task_boost;
690}
691
Srinath Sridharan42503db2016-07-14 13:09:03 -0700692int schedtune_prefer_idle(struct task_struct *p)
693{
694 struct schedtune *st;
695 int prefer_idle;
696
697 /* Get prefer_idle value */
698 rcu_read_lock();
699 st = task_schedtune(p);
700 prefer_idle = st->prefer_idle;
701 rcu_read_unlock();
702
703 return prefer_idle;
704}
705
706static u64
707prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
708{
709 struct schedtune *st = css_st(css);
710
711 return st->prefer_idle;
712}
713
714static int
715prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
716 u64 prefer_idle)
717{
718 struct schedtune *st = css_st(css);
719 st->prefer_idle = prefer_idle;
720
721 return 0;
722}
723
Srinath Sridharane71c4252016-07-28 17:28:55 +0100724static s64
Patrick Bellasiae710302015-06-23 09:17:54 +0100725boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
726{
727 struct schedtune *st = css_st(css);
728
729 return st->boost;
730}
731
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800732#ifdef CONFIG_SCHED_WALT
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800733static void schedtune_attach(struct cgroup_taskset *tset)
734{
735 struct task_struct *task;
736 struct cgroup_subsys_state *css;
737 struct schedtune *st;
738 bool colocate;
739
740 cgroup_taskset_first(tset, &css);
741 st = css_st(css);
742
743 colocate = st->colocate;
744
745 cgroup_taskset_for_each(task, css, tset)
746 sync_cgroup_colocation(task, colocate);
747
748}
749#endif
750
Patrick Bellasiae710302015-06-23 09:17:54 +0100751static int
752boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
Srinath Sridharane71c4252016-07-28 17:28:55 +0100753 s64 boost)
Patrick Bellasiae710302015-06-23 09:17:54 +0100754{
755 struct schedtune *st = css_st(css);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100756 unsigned threshold_idx;
757 int boost_pct;
Patrick Bellasiae710302015-06-23 09:17:54 +0100758
Srinath Sridharane71c4252016-07-28 17:28:55 +0100759 if (boost < -100 || boost > 100)
Patrick Bellasiae710302015-06-23 09:17:54 +0100760 return -EINVAL;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100761 boost_pct = boost;
762
763 /*
764 * Update threshold params for Performance Boost (B)
765 * and Performance Constraint (C) regions.
766 * The current implementatio uses the same cuts for both
767 * B and C regions.
768 */
769 threshold_idx = clamp(boost_pct, 0, 99) / 10;
770 st->perf_boost_idx = threshold_idx;
771 st->perf_constrain_idx = threshold_idx;
Patrick Bellasiae710302015-06-23 09:17:54 +0100772
773 st->boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100774 if (css == &root_schedtune.css) {
Patrick Bellasiae710302015-06-23 09:17:54 +0100775 sysctl_sched_cfs_boost = boost;
Patrick Bellasid5563d32016-07-29 15:32:26 +0100776 perf_boost_idx = threshold_idx;
777 perf_constrain_idx = threshold_idx;
778 }
Patrick Bellasiae710302015-06-23 09:17:54 +0100779
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000780 /* Update CPU boost */
781 schedtune_boostgroup_update(st->idx, st->boost);
782
Patrick Bellasi050dcb82015-06-22 13:49:07 +0100783 trace_sched_tune_config(st->boost);
784
Patrick Bellasiae710302015-06-23 09:17:54 +0100785 return 0;
786}
787
788static struct cftype files[] = {
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800789#ifdef CONFIG_SCHED_WALT
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800790 {
791 .name = "sched_boost_no_override",
792 .read_u64 = sched_boost_override_read,
793 .write_u64 = sched_boost_override_write,
794 },
795 {
796 .name = "sched_boost_enabled",
797 .read_u64 = sched_boost_enabled_read,
798 .write_u64 = sched_boost_enabled_write,
799 },
800 {
801 .name = "colocate",
802 .read_u64 = sched_colocate_read,
803 .write_u64 = sched_colocate_write,
804 },
805#endif
Patrick Bellasiae710302015-06-23 09:17:54 +0100806 {
807 .name = "boost",
Srinath Sridharane71c4252016-07-28 17:28:55 +0100808 .read_s64 = boost_read,
809 .write_s64 = boost_write,
Patrick Bellasiae710302015-06-23 09:17:54 +0100810 },
Srinath Sridharan42503db2016-07-14 13:09:03 -0700811 {
812 .name = "prefer_idle",
813 .read_u64 = prefer_idle_read,
814 .write_u64 = prefer_idle_write,
815 },
Patrick Bellasiae710302015-06-23 09:17:54 +0100816 { } /* terminate */
817};
818
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800819
Patrick Bellasiae710302015-06-23 09:17:54 +0100820static int
821schedtune_boostgroup_init(struct schedtune *st)
822{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000823 struct boost_groups *bg;
824 int cpu;
825
Patrick Bellasiae710302015-06-23 09:17:54 +0100826 /* Keep track of allocated boost groups */
827 allocated_group[st->idx] = st;
828
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000829 /* Initialize the per CPU boost groups */
830 for_each_possible_cpu(cpu) {
831 bg = &per_cpu(cpu_boost_groups, cpu);
832 bg->group[st->idx].boost = 0;
833 bg->group[st->idx].tasks = 0;
834 }
835
Patrick Bellasiae710302015-06-23 09:17:54 +0100836 return 0;
837}
838
Patrick Bellasiae710302015-06-23 09:17:54 +0100839static struct cgroup_subsys_state *
840schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
841{
842 struct schedtune *st;
843 int idx;
844
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100845 if (!parent_css)
Patrick Bellasiae710302015-06-23 09:17:54 +0100846 return &root_schedtune.css;
Patrick Bellasiae710302015-06-23 09:17:54 +0100847
848 /* Allow only single level hierachies */
849 if (parent_css != &root_schedtune.css) {
850 pr_err("Nested SchedTune boosting groups not allowed\n");
851 return ERR_PTR(-ENOMEM);
852 }
853
854 /* Allow only a limited number of boosting groups */
855 for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
856 if (!allocated_group[idx])
857 break;
858 if (idx == BOOSTGROUPS_COUNT) {
859 pr_err("Trying to create more than %d SchedTune boosting groups\n",
860 BOOSTGROUPS_COUNT);
861 return ERR_PTR(-ENOSPC);
862 }
863
864 st = kzalloc(sizeof(*st), GFP_KERNEL);
865 if (!st)
866 goto out;
867
868 /* Initialize per CPUs boost group support */
869 st->idx = idx;
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800870 init_sched_boost(st);
Patrick Bellasiae710302015-06-23 09:17:54 +0100871 if (schedtune_boostgroup_init(st))
872 goto release;
873
874 return &st->css;
875
876release:
877 kfree(st);
878out:
879 return ERR_PTR(-ENOMEM);
880}
881
882static void
883schedtune_boostgroup_release(struct schedtune *st)
884{
Patrick Bellasi9a871ed2016-01-14 12:31:35 +0000885 /* Reset this boost group */
886 schedtune_boostgroup_update(st->idx, 0);
887
Patrick Bellasiae710302015-06-23 09:17:54 +0100888 /* Keep track of allocated boost groups */
889 allocated_group[st->idx] = NULL;
890}
891
892static void
893schedtune_css_free(struct cgroup_subsys_state *css)
894{
895 struct schedtune *st = css_st(css);
896
897 schedtune_boostgroup_release(st);
898 kfree(st);
899}
900
901struct cgroup_subsys schedtune_cgrp_subsys = {
902 .css_alloc = schedtune_css_alloc,
903 .css_free = schedtune_css_free,
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -0800904 .allow_attach = subsys_cgroup_allow_attach,
905 .attach = schedtune_attach,
Patrick Bellasid2489002016-07-28 18:44:40 +0100906 .can_attach = schedtune_can_attach,
907 .cancel_attach = schedtune_cancel_attach,
Patrick Bellasiae710302015-06-23 09:17:54 +0100908 .legacy_cftypes = files,
909 .early_init = 1,
910};
911
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100912static inline void
913schedtune_init_cgroups(void)
914{
915 struct boost_groups *bg;
916 int cpu;
917
918 /* Initialize the per CPU boost groups */
919 for_each_possible_cpu(cpu) {
920 bg = &per_cpu(cpu_boost_groups, cpu);
921 memset(bg, 0, sizeof(struct boost_groups));
Ke Wang751e5092016-11-25 13:38:45 +0800922 raw_spin_lock_init(&bg->lock);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100923 }
924
925 pr_info("schedtune: configured to support %d boost groups\n",
926 BOOSTGROUPS_COUNT);
Patrick Bellasi82ab2432016-08-24 11:02:29 +0100927
928 schedtune_initialized = true;
Patrick Bellasi52cb67e2016-07-29 15:19:41 +0100929}
930
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000931#else /* CONFIG_CGROUP_SCHEDTUNE */
932
933int
934schedtune_accept_deltas(int nrg_delta, int cap_delta,
935 struct task_struct *task)
936{
937 /* Optimal (O) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000938 if (nrg_delta < 0 && cap_delta > 0) {
939 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000940 return INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000941 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000942
943 /* Suboptimal (S) region */
Patrick Bellasi5824d982016-01-20 14:06:05 +0000944 if (nrg_delta > 0 && cap_delta < 0) {
945 trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000946 return -INT_MAX;
Patrick Bellasi5824d982016-01-20 14:06:05 +0000947 }
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000948
949 return __schedtune_accept_deltas(nrg_delta, cap_delta,
950 perf_boost_idx, perf_constrain_idx);
951}
952
Patrick Bellasiae710302015-06-23 09:17:54 +0100953#endif /* CONFIG_CGROUP_SCHEDTUNE */
954
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100955int
956sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
957 void __user *buffer, size_t *lenp,
958 loff_t *ppos)
959{
960 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Patrick Bellasid5563d32016-07-29 15:32:26 +0100961 unsigned threshold_idx;
962 int boost_pct;
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100963
964 if (ret || !write)
965 return ret;
966
Patrick Bellasid5563d32016-07-29 15:32:26 +0100967 if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
968 return -EINVAL;
969 boost_pct = sysctl_sched_cfs_boost;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000970
Patrick Bellasid5563d32016-07-29 15:32:26 +0100971 /*
972 * Update threshold params for Performance Boost (B)
973 * and Performance Constraint (C) regions.
974 * The current implementatio uses the same cuts for both
975 * B and C regions.
976 */
977 threshold_idx = clamp(boost_pct, 0, 99) / 10;
978 perf_boost_idx = threshold_idx;
979 perf_constrain_idx = threshold_idx;
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000980
Patrick Bellasi69fa4c72015-06-22 18:11:44 +0100981 return 0;
982}
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000983
Patrick Bellasi2f369bb2016-01-12 18:12:13 +0000984#ifdef CONFIG_SCHED_DEBUG
985static void
986schedtune_test_nrg(unsigned long delta_pwr)
987{
988 unsigned long test_delta_pwr;
989 unsigned long test_norm_pwr;
990 int idx;
991
992 /*
993 * Check normalization constants using some constant system
994 * energy values
995 */
996 pr_info("schedtune: verify normalization constants...\n");
997 for (idx = 0; idx < 6; ++idx) {
998 test_delta_pwr = delta_pwr >> idx;
999
1000 /* Normalize on max energy for target platform */
1001 test_norm_pwr = reciprocal_divide(
1002 test_delta_pwr << SCHED_CAPACITY_SHIFT,
1003 schedtune_target_nrg.rdiv);
1004
1005 pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
1006 idx, test_delta_pwr, test_norm_pwr);
1007 }
1008}
1009#else
1010#define schedtune_test_nrg(delta_pwr)
1011#endif
1012
1013/*
1014 * Compute the min/max power consumption of a cluster and all its CPUs
1015 */
1016static void
1017schedtune_add_cluster_nrg(
1018 struct sched_domain *sd,
1019 struct sched_group *sg,
1020 struct target_nrg *ste)
1021{
1022 struct sched_domain *sd2;
1023 struct sched_group *sg2;
1024
1025 struct cpumask *cluster_cpus;
1026 char str[32];
1027
1028 unsigned long min_pwr;
1029 unsigned long max_pwr;
1030 int cpu;
1031
1032 /* Get Cluster energy using EM data for the first CPU */
1033 cluster_cpus = sched_group_cpus(sg);
1034 snprintf(str, 32, "CLUSTER[%*pbl]",
1035 cpumask_pr_args(cluster_cpus));
1036
1037 min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
1038 max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
1039 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
1040 str, min_pwr, max_pwr);
1041
1042 /*
1043 * Keep track of this cluster's energy in the computation of the
1044 * overall system energy
1045 */
1046 ste->min_power += min_pwr;
1047 ste->max_power += max_pwr;
1048
1049 /* Get CPU energy using EM data for each CPU in the group */
1050 for_each_cpu(cpu, cluster_cpus) {
1051 /* Get a SD view for the specific CPU */
1052 for_each_domain(cpu, sd2) {
1053 /* Get the CPU group */
1054 sg2 = sd2->groups;
1055 min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
1056 max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
1057
1058 ste->min_power += min_pwr;
1059 ste->max_power += max_pwr;
1060
1061 snprintf(str, 32, "CPU[%d]", cpu);
1062 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
1063 str, min_pwr, max_pwr);
1064
1065 /*
1066 * Assume we have EM data only at the CPU and
1067 * the upper CLUSTER level
1068 */
Patrick Bellasi2f369bb2016-01-12 18:12:13 +00001069 break;
1070 }
1071 }
1072}
1073
1074/*
1075 * Initialize the constants required to compute normalized energy.
1076 * The values of these constants depends on the EM data for the specific
1077 * target system and topology.
1078 * Thus, this function is expected to be called by the code
1079 * that bind the EM to the topology information.
1080 */
1081static int
Patrick Bellasi52cb67e2016-07-29 15:19:41 +01001082schedtune_init(void)
Patrick Bellasi2f369bb2016-01-12 18:12:13 +00001083{
1084 struct target_nrg *ste = &schedtune_target_nrg;
1085 unsigned long delta_pwr = 0;
1086 struct sched_domain *sd;
1087 struct sched_group *sg;
1088
1089 pr_info("schedtune: init normalization constants...\n");
1090 ste->max_power = 0;
1091 ste->min_power = 0;
1092
1093 rcu_read_lock();
1094
1095 /*
1096 * When EAS is in use, we always have a pointer to the highest SD
1097 * which provides EM data.
1098 */
1099 sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
1100 if (!sd) {
1101 pr_info("schedtune: no energy model data\n");
1102 goto nodata;
1103 }
1104
1105 sg = sd->groups;
1106 do {
1107 schedtune_add_cluster_nrg(sd, sg, ste);
1108 } while (sg = sg->next, sg != sd->groups);
1109
1110 rcu_read_unlock();
1111
1112 pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
1113 "SYSTEM", ste->min_power, ste->max_power);
1114
1115 /* Compute normalization constants */
1116 delta_pwr = ste->max_power - ste->min_power;
1117 ste->rdiv = reciprocal_value(delta_pwr);
1118 pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
1119 ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
1120
1121 schedtune_test_nrg(delta_pwr);
Patrick Bellasi52cb67e2016-07-29 15:19:41 +01001122
1123#ifdef CONFIG_CGROUP_SCHEDTUNE
1124 schedtune_init_cgroups();
1125#else
1126 pr_info("schedtune: configured to support global boosting only\n");
Channagoud Kadabi8810e5f2017-02-17 16:01:05 -08001127#endif /* CONFIG_CGROUP_SCHEDTUNE */
Patrick Bellasi52cb67e2016-07-29 15:19:41 +01001128
Patrick Bellasi2f369bb2016-01-12 18:12:13 +00001129 return 0;
1130
1131nodata:
1132 rcu_read_unlock();
1133 return -EINVAL;
1134}
Patrick Bellasif4725392016-07-29 16:09:03 +01001135postcore_initcall(schedtune_init);