blob: a20bf12f3ce31a670e0a8960342fefa529fc5a22 [file] [log] [blame]
Daniel Lezcano88763a52018-06-26 12:53:29 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Linaro Limited
4 *
5 * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6 *
7 * The idle injection framework provides a way to force CPUs to enter idle
8 * states for a specified fraction of time over a specified period.
9 *
10 * It relies on the smpboot kthreads feature providing common code for CPU
11 * hotplug and thread [un]parking.
12 *
13 * All of the kthreads used for idle injection are created at init time.
14 *
Jason Wang86ffed32021-12-12 11:06:40 +080015 * Next, the users of the idle injection framework provide a cpumask via
Daniel Lezcano88763a52018-06-26 12:53:29 +020016 * its register function. The kthreads will be synchronized with respect to
17 * this cpumask.
18 *
19 * The idle + run duration is specified via separate helpers and that allows
20 * idle injection to be started.
21 *
Yangtao Li07350692020-06-21 16:04:12 +080022 * The idle injection kthreads will call play_idle_precise() with the idle
23 * duration and max allowed latency specified as per the above.
Daniel Lezcano88763a52018-06-26 12:53:29 +020024 *
25 * After all of them have been woken up, a timer is set to start the next idle
26 * injection cycle.
27 *
28 * The timer interrupt handler will wake up the idle injection kthreads for
29 * all of the CPUs in the cpumask provided by the user.
30 *
31 * Idle injection is stopped synchronously and no leftover idle injection
32 * kthread activity after its completion is guaranteed.
33 *
34 * It is up to the user of this framework to provide a lock for higher-level
35 * synchronization to prevent race conditions like starting idle injection
36 * while unregistering from the framework.
37 */
38#define pr_fmt(fmt) "ii_dev: " fmt
39
40#include <linux/cpu.h>
41#include <linux/hrtimer.h>
42#include <linux/kthread.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/smpboot.h>
Pujin Shi00610932020-09-22 12:46:52 +080046#include <linux/idle_inject.h>
Daniel Lezcano88763a52018-06-26 12:53:29 +020047
48#include <uapi/linux/sched/types.h>
49
50/**
51 * struct idle_inject_thread - task on/off switch structure
52 * @tsk: task injecting the idle cycles
53 * @should_run: whether or not to run the task (for the smpboot kthread API)
54 */
55struct idle_inject_thread {
56 struct task_struct *tsk;
57 int should_run;
58};
59
60/**
61 * struct idle_inject_device - idle injection data
62 * @timer: idle injection period timer
Daniel Lezcanocd4c0762019-08-02 19:34:24 +020063 * @idle_duration_us: duration of CPU idle time to inject
64 * @run_duration_us: duration of CPU run time to allow
Daniel Lezcano333cff6c2020-04-29 12:36:39 +020065 * @latency_us: max allowed latency
Daniel Lezcano88763a52018-06-26 12:53:29 +020066 * @cpumask: mask of CPUs affected by idle injection
67 */
68struct idle_inject_device {
69 struct hrtimer timer;
Daniel Lezcanocd4c0762019-08-02 19:34:24 +020070 unsigned int idle_duration_us;
71 unsigned int run_duration_us;
Daniel Lezcano333cff6c2020-04-29 12:36:39 +020072 unsigned int latency_us;
Gustavo A. R. Silva27565c92020-02-27 13:07:21 -060073 unsigned long cpumask[];
Daniel Lezcano88763a52018-06-26 12:53:29 +020074};
75
76static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
77static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
78
79/**
80 * idle_inject_wakeup - Wake up idle injection threads
81 * @ii_dev: target idle injection device
82 *
83 * Every idle injection task associated with the given idle injection device
84 * and running on an online CPU will be woken up.
85 */
86static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
87{
88 struct idle_inject_thread *iit;
89 unsigned int cpu;
90
91 for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
92 iit = per_cpu_ptr(&idle_inject_thread, cpu);
93 iit->should_run = 1;
94 wake_up_process(iit->tsk);
95 }
96}
97
98/**
99 * idle_inject_timer_fn - idle injection timer function
100 * @timer: idle injection hrtimer
101 *
102 * This function is called when the idle injection timer expires. It wakes up
103 * idle injection tasks associated with the timer and they, in turn, invoke
Yangtao Li07350692020-06-21 16:04:12 +0800104 * play_idle_precise() to inject a specified amount of CPU idle time.
Daniel Lezcano88763a52018-06-26 12:53:29 +0200105 *
106 * Return: HRTIMER_RESTART.
107 */
108static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
109{
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200110 unsigned int duration_us;
Daniel Lezcano88763a52018-06-26 12:53:29 +0200111 struct idle_inject_device *ii_dev =
112 container_of(timer, struct idle_inject_device, timer);
113
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200114 duration_us = READ_ONCE(ii_dev->run_duration_us);
115 duration_us += READ_ONCE(ii_dev->idle_duration_us);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200116
117 idle_inject_wakeup(ii_dev);
118
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200119 hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
Daniel Lezcano88763a52018-06-26 12:53:29 +0200120
121 return HRTIMER_RESTART;
122}
123
124/**
125 * idle_inject_fn - idle injection work function
126 * @cpu: the CPU owning the task
127 *
Yangtao Li07350692020-06-21 16:04:12 +0800128 * This function calls play_idle_precise() to inject a specified amount of CPU
129 * idle time.
Daniel Lezcano88763a52018-06-26 12:53:29 +0200130 */
131static void idle_inject_fn(unsigned int cpu)
132{
133 struct idle_inject_device *ii_dev;
134 struct idle_inject_thread *iit;
135
136 ii_dev = per_cpu(idle_inject_device, cpu);
137 iit = per_cpu_ptr(&idle_inject_thread, cpu);
138
139 /*
140 * Let the smpboot main loop know that the task should not run again.
141 */
142 iit->should_run = 0;
143
Daniel Lezcano333cff6c2020-04-29 12:36:39 +0200144 play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC,
145 READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200146}
147
148/**
149 * idle_inject_set_duration - idle and run duration update helper
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200150 * @run_duration_us: CPU run time to allow in microseconds
151 * @idle_duration_us: CPU idle time to inject in microseconds
Daniel Lezcano88763a52018-06-26 12:53:29 +0200152 */
153void idle_inject_set_duration(struct idle_inject_device *ii_dev,
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200154 unsigned int run_duration_us,
155 unsigned int idle_duration_us)
Daniel Lezcano88763a52018-06-26 12:53:29 +0200156{
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200157 if (run_duration_us && idle_duration_us) {
158 WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
159 WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200160 }
161}
162
163/**
164 * idle_inject_get_duration - idle and run duration retrieval helper
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200165 * @run_duration_us: memory location to store the current CPU run time
166 * @idle_duration_us: memory location to store the current CPU idle time
Daniel Lezcano88763a52018-06-26 12:53:29 +0200167 */
168void idle_inject_get_duration(struct idle_inject_device *ii_dev,
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200169 unsigned int *run_duration_us,
170 unsigned int *idle_duration_us)
Daniel Lezcano88763a52018-06-26 12:53:29 +0200171{
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200172 *run_duration_us = READ_ONCE(ii_dev->run_duration_us);
173 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200174}
175
176/**
Daniel Lezcano333cff6c2020-04-29 12:36:39 +0200177 * idle_inject_set_latency - set the maximum latency allowed
178 * @latency_us: set the latency requirement for the idle state
179 */
180void idle_inject_set_latency(struct idle_inject_device *ii_dev,
181 unsigned int latency_us)
182{
183 WRITE_ONCE(ii_dev->latency_us, latency_us);
184}
185
186/**
Daniel Lezcano88763a52018-06-26 12:53:29 +0200187 * idle_inject_start - start idle injections
188 * @ii_dev: idle injection control device structure
189 *
190 * The function starts idle injection by first waking up all of the idle
191 * injection kthreads associated with @ii_dev to let them inject CPU idle time
192 * sets up a timer to start the next idle injection period.
193 *
194 * Return: -EINVAL if the CPU idle or CPU run time is not set or 0 on success.
195 */
196int idle_inject_start(struct idle_inject_device *ii_dev)
197{
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200198 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
199 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200200
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200201 if (!idle_duration_us || !run_duration_us)
Daniel Lezcano88763a52018-06-26 12:53:29 +0200202 return -EINVAL;
203
204 pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
205 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
206
207 idle_inject_wakeup(ii_dev);
208
209 hrtimer_start(&ii_dev->timer,
Daniel Lezcanocd4c0762019-08-02 19:34:24 +0200210 ns_to_ktime((idle_duration_us + run_duration_us) *
211 NSEC_PER_USEC),
Daniel Lezcano88763a52018-06-26 12:53:29 +0200212 HRTIMER_MODE_REL);
213
214 return 0;
215}
216
217/**
218 * idle_inject_stop - stops idle injections
219 * @ii_dev: idle injection control device structure
220 *
221 * The function stops idle injection and waits for the threads to finish work.
222 * If CPU idle time is being injected when this function runs, then it will
223 * wait until the end of the cycle.
224 *
225 * When it returns, there is no more idle injection kthread activity. The
226 * kthreads are scheduled out and the periodic timer is off.
227 */
228void idle_inject_stop(struct idle_inject_device *ii_dev)
229{
230 struct idle_inject_thread *iit;
231 unsigned int cpu;
232
233 pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
234 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
235
236 hrtimer_cancel(&ii_dev->timer);
237
238 /*
239 * Stopping idle injection requires all of the idle injection kthreads
240 * associated with the given cpumask to be parked and stay that way, so
241 * prevent CPUs from going online at this point. Any CPUs going online
242 * after the loop below will be covered by clearing the should_run flag
243 * that will cause the smpboot main loop to schedule them out.
244 */
245 cpu_hotplug_disable();
246
247 /*
248 * Iterate over all (online + offline) CPUs here in case one of them
249 * goes offline with the should_run flag set so as to prevent its idle
250 * injection kthread from running when the CPU goes online again after
251 * the ii_dev has been freed.
252 */
253 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
254 iit = per_cpu_ptr(&idle_inject_thread, cpu);
255 iit->should_run = 0;
256
257 wait_task_inactive(iit->tsk, 0);
258 }
259
260 cpu_hotplug_enable();
261}
262
263/**
264 * idle_inject_setup - prepare the current task for idle injection
265 * @cpu: not used
266 *
267 * Called once, this function is in charge of setting the current task's
268 * scheduler parameters to make it an RT task.
269 */
270static void idle_inject_setup(unsigned int cpu)
271{
Peter Zijlstrac3f47cf2020-04-21 12:09:13 +0200272 sched_set_fifo(current);
Daniel Lezcano88763a52018-06-26 12:53:29 +0200273}
274
275/**
276 * idle_inject_should_run - function helper for the smpboot API
277 * @cpu: CPU the kthread is running on
278 *
279 * Return: whether or not the thread can run.
280 */
281static int idle_inject_should_run(unsigned int cpu)
282{
283 struct idle_inject_thread *iit =
284 per_cpu_ptr(&idle_inject_thread, cpu);
285
286 return iit->should_run;
287}
288
289/**
290 * idle_inject_register - initialize idle injection on a set of CPUs
291 * @cpumask: CPUs to be affected by idle injection
292 *
293 * This function creates an idle injection control device structure for the
294 * given set of CPUs and initializes the timer associated with it. It does not
295 * start any injection cycles.
296 *
297 * Return: NULL if memory allocation fails, idle injection control device
298 * pointer on success.
299 */
300struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
301{
302 struct idle_inject_device *ii_dev;
303 int cpu, cpu_rb;
304
305 ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
306 if (!ii_dev)
307 return NULL;
308
309 cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
310 hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
311 ii_dev->timer.function = idle_inject_timer_fn;
Daniel Lezcano333cff6c2020-04-29 12:36:39 +0200312 ii_dev->latency_us = UINT_MAX;
Daniel Lezcano88763a52018-06-26 12:53:29 +0200313
314 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
315
316 if (per_cpu(idle_inject_device, cpu)) {
317 pr_err("cpu%d is already registered\n", cpu);
318 goto out_rollback;
319 }
320
321 per_cpu(idle_inject_device, cpu) = ii_dev;
322 }
323
324 return ii_dev;
325
326out_rollback:
327 for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
328 if (cpu == cpu_rb)
329 break;
330 per_cpu(idle_inject_device, cpu_rb) = NULL;
331 }
332
333 kfree(ii_dev);
334
335 return NULL;
336}
337
338/**
339 * idle_inject_unregister - unregister idle injection control device
340 * @ii_dev: idle injection control device to unregister
341 *
342 * The function stops idle injection for the given control device,
343 * unregisters its kthreads and frees memory allocated when that device was
344 * created.
345 */
346void idle_inject_unregister(struct idle_inject_device *ii_dev)
347{
348 unsigned int cpu;
349
350 idle_inject_stop(ii_dev);
351
352 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
353 per_cpu(idle_inject_device, cpu) = NULL;
354
355 kfree(ii_dev);
356}
357
358static struct smp_hotplug_thread idle_inject_threads = {
359 .store = &idle_inject_thread.tsk,
360 .setup = idle_inject_setup,
361 .thread_fn = idle_inject_fn,
362 .thread_comm = "idle_inject/%u",
363 .thread_should_run = idle_inject_should_run,
364};
365
366static int __init idle_inject_init(void)
367{
368 return smpboot_register_percpu_thread(&idle_inject_threads);
369}
370early_initcall(idle_inject_init);