blob: 47dc34c0129975225a8967b2bf17e9b3850d1e4d [file] [log] [blame]
Connor O'Brien8bea10e2018-01-31 18:11:57 -08001/* drivers/cpufreq/cpufreq_times.c
2 *
3 * Copyright (C) 2018 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/cpufreq.h>
17#include <linux/cpufreq_times.h>
18#include <linux/jiffies.h>
19#include <linux/sched.h>
20#include <linux/seq_file.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/threads.h>
zhengding chen39837532021-06-01 20:59:53 +080024#include <trace/hooks/cpufreq.h>
Connor O'Brien8bea10e2018-01-31 18:11:57 -080025
26static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
27
28/**
29 * struct cpu_freqs - per-cpu frequency information
30 * @offset: start of these freqs' stats in task time_in_state array
31 * @max_state: number of entries in freq_table
32 * @last_index: index in freq_table of last frequency switched to
33 * @freq_table: list of available frequencies
34 */
35struct cpu_freqs {
36 unsigned int offset;
37 unsigned int max_state;
38 unsigned int last_index;
39 unsigned int freq_table[0];
40};
41
42static struct cpu_freqs *all_freqs[NR_CPUS];
43
44static unsigned int next_offset;
45
46void cpufreq_task_times_init(struct task_struct *p)
47{
48 unsigned long flags;
49
50 spin_lock_irqsave(&task_time_in_state_lock, flags);
51 p->time_in_state = NULL;
52 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
53 p->max_state = 0;
54}
55
56void cpufreq_task_times_alloc(struct task_struct *p)
57{
58 void *temp;
59 unsigned long flags;
60 unsigned int max_state = READ_ONCE(next_offset);
61
62 /* We use one array to avoid multiple allocs per task */
63 temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
64 if (!temp)
65 return;
66
67 spin_lock_irqsave(&task_time_in_state_lock, flags);
68 p->time_in_state = temp;
69 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
70 p->max_state = max_state;
71}
72
73/* Caller must hold task_time_in_state_lock */
74static int cpufreq_task_times_realloc_locked(struct task_struct *p)
75{
76 void *temp;
77 unsigned int max_state = READ_ONCE(next_offset);
78
79 temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
80 if (!temp)
81 return -ENOMEM;
82 p->time_in_state = temp;
83 memset(p->time_in_state + p->max_state, 0,
84 (max_state - p->max_state) * sizeof(u64));
85 p->max_state = max_state;
86 return 0;
87}
88
89void cpufreq_task_times_exit(struct task_struct *p)
90{
91 unsigned long flags;
92 void *temp;
93
Connor O'Brienc4089922018-02-06 13:30:27 -080094 if (!p->time_in_state)
95 return;
96
Connor O'Brien8bea10e2018-01-31 18:11:57 -080097 spin_lock_irqsave(&task_time_in_state_lock, flags);
98 temp = p->time_in_state;
99 p->time_in_state = NULL;
100 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
101 kfree(temp);
102}
103
104int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
105 struct pid *pid, struct task_struct *p)
106{
107 unsigned int cpu, i;
108 u64 cputime;
109 unsigned long flags;
110 struct cpu_freqs *freqs;
111 struct cpu_freqs *last_freqs = NULL;
112
113 spin_lock_irqsave(&task_time_in_state_lock, flags);
114 for_each_possible_cpu(cpu) {
115 freqs = all_freqs[cpu];
116 if (!freqs || freqs == last_freqs)
117 continue;
118 last_freqs = freqs;
119
120 seq_printf(m, "cpu%u\n", cpu);
121 for (i = 0; i < freqs->max_state; i++) {
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800122 cputime = 0;
123 if (freqs->offset + i < p->max_state &&
124 p->time_in_state)
125 cputime = p->time_in_state[freqs->offset + i];
126 seq_printf(m, "%u %lu\n", freqs->freq_table[i],
127 (unsigned long)nsec_to_clock_t(cputime));
128 }
129 }
130 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
131 return 0;
132}
133
134void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
135{
136 unsigned long flags;
137 unsigned int state;
138 struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
139
Connor O'Brien42429982018-10-05 19:20:54 -0700140 if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800141 return;
142
143 state = freqs->offset + READ_ONCE(freqs->last_index);
144
145 spin_lock_irqsave(&task_time_in_state_lock, flags);
146 if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
147 p->time_in_state)
148 p->time_in_state[state] += cputime;
149 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
zhengding chen39837532021-06-01 20:59:53 +0800150
151 trace_android_vh_cpufreq_acct_update_power(cputime, p, state);
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800152}
153
Connor O'Brien073ef862019-03-01 15:40:30 -0800154static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
155{
156 int index;
157 for (index = 0; index < freqs->max_state; ++index) {
158 if (freqs->freq_table[index] == freq)
159 return index;
160 }
161 return -1;
162}
163
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800164void cpufreq_times_create_policy(struct cpufreq_policy *policy)
165{
Connor O'Brien073ef862019-03-01 15:40:30 -0800166 int cpu, index = 0;
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800167 unsigned int count = 0;
168 struct cpufreq_frequency_table *pos, *table;
169 struct cpu_freqs *freqs;
170 void *tmp;
171
172 if (all_freqs[policy->cpu])
173 return;
174
175 table = policy->freq_table;
176 if (!table)
177 return;
178
Connor O'Brien073ef862019-03-01 15:40:30 -0800179 cpufreq_for_each_valid_entry(pos, table)
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800180 count++;
181
182 tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
183 GFP_KERNEL);
184 if (!tmp)
185 return;
186
187 freqs = tmp;
188 freqs->max_state = count;
189
Connor O'Brien073ef862019-03-01 15:40:30 -0800190 cpufreq_for_each_valid_entry(pos, table)
191 freqs->freq_table[index++] = pos->frequency;
192
193 index = cpufreq_times_get_index(freqs, policy->cur);
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800194 if (index >= 0)
195 WRITE_ONCE(freqs->last_index, index);
196
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800197 freqs->offset = next_offset;
198 WRITE_ONCE(next_offset, freqs->offset + count);
199 for_each_cpu(cpu, policy->related_cpus)
200 all_freqs[cpu] = freqs;
201}
202
Connor O'Brien0d084d72019-02-08 12:30:41 -0800203void cpufreq_times_record_transition(struct cpufreq_policy *policy,
204 unsigned int new_freq)
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800205{
206 int index;
Connor O'Brien0d084d72019-02-08 12:30:41 -0800207 struct cpu_freqs *freqs = all_freqs[policy->cpu];
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800208 if (!freqs)
209 return;
210
Connor O'Brien073ef862019-03-01 15:40:30 -0800211 index = cpufreq_times_get_index(freqs, new_freq);
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800212 if (index >= 0)
213 WRITE_ONCE(freqs->last_index, index);
Connor O'Brien8bea10e2018-01-31 18:11:57 -0800214}