blob: 8d7cf3f31450728ab60a3f349f2e9f88b00cfb95 [file] [log] [blame]
Len Brown4f86d3a2007-10-03 18:58:00 -04001/*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 *
6 * This code is licenced under the GPL.
7 */
8
9#include <linux/kernel.h>
10#include <linux/cpuidle.h>
Mark Grossd82b3512008-02-04 22:30:08 -080011#include <linux/pm_qos_params.h>
Len Brown4f86d3a2007-10-03 18:58:00 -040012#include <linux/time.h>
13#include <linux/ktime.h>
14#include <linux/hrtimer.h>
15#include <linux/tick.h>
16
17#define BREAK_FUZZ 4 /* 4 us */
18
19struct menu_device {
20 int last_state_idx;
21
22 unsigned int expected_us;
23 unsigned int predicted_us;
24 unsigned int last_measured_us;
25 unsigned int elapsed_us;
26};
27
28static DEFINE_PER_CPU(struct menu_device, menu_devices);
29
30/**
31 * menu_select - selects the next idle state to enter
32 * @dev: the CPU
33 */
34static int menu_select(struct cpuidle_device *dev)
35{
36 struct menu_device *data = &__get_cpu_var(menu_devices);
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070037 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
Len Brown4f86d3a2007-10-03 18:58:00 -040038 int i;
39
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070040 /* Special case when user has set very strict latency requirement */
41 if (unlikely(latency_req == 0)) {
42 data->last_state_idx = 0;
43 return 0;
44 }
45
Len Brown4f86d3a2007-10-03 18:58:00 -040046 /* determine the expected residency time */
47 data->expected_us =
48 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
49
50 /* find the deepest idle state that satisfies our constraints */
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070051 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
Len Brown4f86d3a2007-10-03 18:58:00 -040052 struct cpuidle_state *s = &dev->states[i];
53
54 if (s->target_residency > data->expected_us)
55 break;
56 if (s->target_residency > data->predicted_us)
57 break;
venkatesh.pallipadi@intel.coma2bd92022008-07-30 19:21:42 -070058 if (s->exit_latency > latency_req)
Len Brown4f86d3a2007-10-03 18:58:00 -040059 break;
60 }
61
62 data->last_state_idx = i - 1;
63 return i - 1;
64}
65
66/**
67 * menu_reflect - attempts to guess what happened after entry
68 * @dev: the CPU
69 *
70 * NOTE: it's important to be fast here because this operation will add to
71 * the overall exit latency.
72 */
73static void menu_reflect(struct cpuidle_device *dev)
74{
75 struct menu_device *data = &__get_cpu_var(menu_devices);
76 int last_idx = data->last_state_idx;
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070077 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
Len Brown4f86d3a2007-10-03 18:58:00 -040078 struct cpuidle_state *target = &dev->states[last_idx];
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070079 unsigned int measured_us;
Len Brown4f86d3a2007-10-03 18:58:00 -040080
81 /*
82 * Ugh, this idle state doesn't support residency measurements, so we
83 * are basically lost in the dark. As a compromise, assume we slept
84 * for one full standard timer tick. However, be aware that this
85 * could potentially result in a suboptimal state transition.
86 */
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070087 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
88 last_idle_us = USEC_PER_SEC / HZ;
Len Brown4f86d3a2007-10-03 18:58:00 -040089
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -070090 /*
91 * measured_us and elapsed_us are the cumulative idle time, since the
92 * last time we were woken out of idle by an interrupt.
93 */
94 if (data->elapsed_us <= data->elapsed_us + last_idle_us)
95 measured_us = data->elapsed_us + last_idle_us;
96 else
97 measured_us = -1;
98
99 /* Predict time until next break event */
100 data->predicted_us = max(measured_us, data->last_measured_us);
101
102 if (last_idle_us + BREAK_FUZZ <
103 data->expected_us - target->exit_latency) {
Len Brown4f86d3a2007-10-03 18:58:00 -0400104 data->last_measured_us = measured_us;
105 data->elapsed_us = 0;
106 } else {
venkatesh.pallipadi@intel.com320eee72008-07-30 19:21:43 -0700107 data->elapsed_us = measured_us;
Len Brown4f86d3a2007-10-03 18:58:00 -0400108 }
109}
110
111/**
112 * menu_enable_device - scans a CPU's states and does setup
113 * @dev: the CPU
114 */
115static int menu_enable_device(struct cpuidle_device *dev)
116{
117 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
118
119 memset(data, 0, sizeof(struct menu_device));
120
121 return 0;
122}
123
124static struct cpuidle_governor menu_governor = {
125 .name = "menu",
126 .rating = 20,
127 .enable = menu_enable_device,
128 .select = menu_select,
129 .reflect = menu_reflect,
130 .owner = THIS_MODULE,
131};
132
133/**
134 * init_menu - initializes the governor
135 */
136static int __init init_menu(void)
137{
138 return cpuidle_register_governor(&menu_governor);
139}
140
141/**
142 * exit_menu - exits the governor
143 */
144static void __exit exit_menu(void)
145{
146 cpuidle_unregister_governor(&menu_governor);
147}
148
149MODULE_LICENSE("GPL");
150module_init(init_menu);
151module_exit(exit_menu);