blob: 7fc272ecae1621e0306a8c00065d670ad7694ce6 [file] [log] [blame]
Marc Zyngier53e72402013-01-23 13:21:58 -05001/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
Marc Zyngier53e72402013-01-23 13:21:58 -050020#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
Christoffer Dallb452cb52016-06-04 15:41:00 +010023#include <linux/irq.h>
Christoffer Dall99a1db72017-05-02 20:19:15 +020024#include <linux/uaccess.h>
Marc Zyngier53e72402013-01-23 13:21:58 -050025
Mark Rutland372b7c12013-03-27 15:56:11 +000026#include <clocksource/arm_arch_timer.h>
Marc Zyngier53e72402013-01-23 13:21:58 -050027#include <asm/arch_timer.h>
Andre Przywara84135d32018-07-05 16:48:23 +010028#include <asm/kvm_emulate.h>
Jintack Lim488f94d2016-12-01 14:32:05 -050029#include <asm/kvm_hyp.h>
Marc Zyngier53e72402013-01-23 13:21:58 -050030
Marc Zyngier7275acd2013-05-14 14:31:01 +010031#include <kvm/arm_vgic.h>
32#include <kvm/arm_arch_timer.h>
Marc Zyngier53e72402013-01-23 13:21:58 -050033
Christoffer Dalle21f0912015-08-30 13:57:20 +020034#include "trace.h"
35
Marc Zyngier53e72402013-01-23 13:21:58 -050036static struct timecounter *timecounter;
Anup Patel5ae7f872013-04-30 12:02:15 +053037static unsigned int host_vtimer_irq;
Christoffer Dall9e01dc72019-02-19 14:04:30 +010038static unsigned int host_ptimer_irq;
Marc Zyngiercabdc5c2016-08-16 15:03:02 +010039static u32 host_vtimer_irq_flags;
Christoffer Dall9e01dc72019-02-19 14:04:30 +010040static u32 host_ptimer_irq_flags;
Marc Zyngier53e72402013-01-23 13:21:58 -050041
Christoffer Dalld60d8b62018-01-26 16:06:51 +010042static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
43
Christoffer Dall85e69ad2017-05-02 20:14:06 +020044static const struct kvm_irq_level default_ptimer_irq = {
45 .irq = 30,
46 .level = 1,
47};
48
49static const struct kvm_irq_level default_vtimer_irq = {
50 .irq = 27,
51 .level = 1,
52};
53
Christoffer Dallb103cc32016-10-16 20:30:38 +020054static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
55static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
56 struct arch_timer_context *timer_ctx);
Christoffer Dall1c88ab72017-01-06 16:07:48 +010057static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
Andre Przywara84135d32018-07-05 16:48:23 +010058static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
59 struct arch_timer_context *timer,
60 enum kvm_arch_timer_regs treg,
61 u64 val);
62static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
63 struct arch_timer_context *timer,
64 enum kvm_arch_timer_regs treg);
Marc Zyngier9b4a3002016-01-29 19:04:48 +000065
Jintack Lim7b6b4632017-02-03 10:20:08 -050066u64 kvm_phys_timer_read(void)
Marc Zyngier53e72402013-01-23 13:21:58 -050067{
68 return timecounter->cc->read(timecounter->cc);
69}
70
Christoffer Dallbee038a62019-01-04 13:31:22 +010071static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
72{
73 if (has_vhe()) {
74 map->direct_vtimer = vcpu_vtimer(vcpu);
75 map->direct_ptimer = vcpu_ptimer(vcpu);
76 map->emul_ptimer = NULL;
77 } else {
78 map->direct_vtimer = vcpu_vtimer(vcpu);
79 map->direct_ptimer = NULL;
80 map->emul_ptimer = vcpu_ptimer(vcpu);
81 }
82
83 trace_kvm_get_timer_map(vcpu->vcpu_id, map);
84}
85
Christoffer Dalld60d8b62018-01-26 16:06:51 +010086static inline bool userspace_irqchip(struct kvm *kvm)
87{
88 return static_branch_unlikely(&userspace_irqchip_in_use) &&
89 unlikely(!irqchip_in_kernel(kvm));
90}
91
Christoffer Dall8409a062017-06-17 01:09:19 -070092static void soft_timer_start(struct hrtimer *hrt, u64 ns)
Marc Zyngier53e72402013-01-23 13:21:58 -050093{
Christoffer Dall8409a062017-06-17 01:09:19 -070094 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
Marc Zyngier53e72402013-01-23 13:21:58 -050095 HRTIMER_MODE_ABS);
96}
97
Christoffer Dall8a411b02018-11-27 13:48:08 +010098static void soft_timer_cancel(struct hrtimer *hrt)
Marc Zyngier53e72402013-01-23 13:21:58 -050099{
Christoffer Dall8409a062017-06-17 01:09:19 -0700100 hrtimer_cancel(hrt);
Marc Zyngier53e72402013-01-23 13:21:58 -0500101}
102
Marc Zyngier53e72402013-01-23 13:21:58 -0500103static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
104{
105 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100106 struct arch_timer_context *ctx;
Christoffer Dallbee038a62019-01-04 13:31:22 +0100107 struct timer_map map;
Marc Zyngier53e72402013-01-23 13:21:58 -0500108
Christoffer Dall36e5cfd2017-12-14 19:54:50 +0100109 /*
110 * We may see a timer interrupt after vcpu_put() has been called which
111 * sets the CPU's vcpu pointer to NULL, because even though the timer
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100112 * has been disabled in timer_save_state(), the hardware interrupt
Christoffer Dall36e5cfd2017-12-14 19:54:50 +0100113 * signal may not have been retired from the interrupt controller yet.
114 */
115 if (!vcpu)
116 return IRQ_HANDLED;
Christoffer Dallb103cc32016-10-16 20:30:38 +0200117
Christoffer Dallbee038a62019-01-04 13:31:22 +0100118 get_timer_map(vcpu, &map);
119
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100120 if (irq == host_vtimer_irq)
Christoffer Dallbee038a62019-01-04 13:31:22 +0100121 ctx = map.direct_vtimer;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100122 else
Christoffer Dallbee038a62019-01-04 13:31:22 +0100123 ctx = map.direct_ptimer;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100124
125 if (kvm_timer_should_fire(ctx))
126 kvm_timer_update_irq(vcpu, true, ctx);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200127
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100128 if (userspace_irqchip(vcpu->kvm) &&
129 !static_branch_unlikely(&has_gic_active_state))
130 disable_percpu_irq(host_vtimer_irq);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200131
Marc Zyngier53e72402013-01-23 13:21:58 -0500132 return IRQ_HANDLED;
133}
134
Jintack Lim9171fa22017-02-03 10:20:01 -0500135static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100136{
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100137 u64 cval, now;
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100138
Jintack Lim9171fa22017-02-03 10:20:01 -0500139 cval = timer_ctx->cnt_cval;
140 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100141
142 if (now < cval) {
143 u64 ns;
144
145 ns = cyclecounter_cyc2ns(timecounter->cc,
146 cval - now,
147 timecounter->mask,
148 &timecounter->frac);
149 return ns;
150 }
151
152 return 0;
153}
154
Jintack Limfb280e92017-02-03 10:20:05 -0500155static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
156{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100157 WARN_ON(timer_ctx && timer_ctx->loaded);
158 return timer_ctx &&
159 !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
Jintack Limfb280e92017-02-03 10:20:05 -0500160 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
161}
162
163/*
164 * Returns the earliest expiration time in ns among guest timers.
165 * Note that it will return 0 if none of timers can fire.
166 */
167static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
168{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100169 u64 min_delta = ULLONG_MAX;
170 int i;
Jintack Limfb280e92017-02-03 10:20:05 -0500171
Christoffer Dallbee038a62019-01-04 13:31:22 +0100172 for (i = 0; i < NR_KVM_TIMERS; i++) {
173 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
Jintack Limfb280e92017-02-03 10:20:05 -0500174
Christoffer Dallbee038a62019-01-04 13:31:22 +0100175 WARN(ctx->loaded, "timer %d loaded\n", i);
176 if (kvm_timer_irq_can_fire(ctx))
177 min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
178 }
Jintack Limfb280e92017-02-03 10:20:05 -0500179
180 /* If none of timers can fire, then return 0 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100181 if (min_delta == ULLONG_MAX)
Jintack Limfb280e92017-02-03 10:20:05 -0500182 return 0;
183
Christoffer Dallbee038a62019-01-04 13:31:22 +0100184 return min_delta;
Jintack Limfb280e92017-02-03 10:20:05 -0500185}
186
Christoffer Dall14d61fa2017-06-17 07:33:02 -0700187static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
Marc Zyngier53e72402013-01-23 13:21:58 -0500188{
189 struct arch_timer_cpu *timer;
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100190 struct kvm_vcpu *vcpu;
191 u64 ns;
192
Christoffer Dall14d61fa2017-06-17 07:33:02 -0700193 timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100194 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
195
196 /*
197 * Check that the timer has really expired from the guest's
198 * PoV (NTP on the host may have forced it to expire
199 * early). If we should have slept longer, restart it.
200 */
Jintack Limfb280e92017-02-03 10:20:05 -0500201 ns = kvm_timer_earliest_exp(vcpu);
Marc Zyngier1c5631c2016-04-06 09:37:22 +0100202 if (unlikely(ns)) {
203 hrtimer_forward_now(hrt, ns_to_ktime(ns));
204 return HRTIMER_RESTART;
205 }
206
Christoffer Dall8a411b02018-11-27 13:48:08 +0100207 kvm_vcpu_wake_up(vcpu);
Marc Zyngier53e72402013-01-23 13:21:58 -0500208 return HRTIMER_NORESTART;
209}
210
Christoffer Dallbee038a62019-01-04 13:31:22 +0100211static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
Christoffer Dallf2a21292017-06-18 00:32:08 -0700212{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100213 struct arch_timer_context *ctx;
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700214 struct kvm_vcpu *vcpu;
215 u64 ns;
216
Christoffer Dallbee038a62019-01-04 13:31:22 +0100217 ctx = container_of(hrt, struct arch_timer_context, hrtimer);
218 vcpu = ctx->vcpu;
219
220 trace_kvm_timer_hrtimer_expire(ctx);
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700221
222 /*
223 * Check that the timer has really expired from the guest's
224 * PoV (NTP on the host may have forced it to expire
225 * early). If not ready, schedule for a later time.
226 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100227 ns = kvm_timer_compute_delta(ctx);
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700228 if (unlikely(ns)) {
229 hrtimer_forward_now(hrt, ns_to_ktime(ns));
230 return HRTIMER_RESTART;
231 }
232
Christoffer Dallbee038a62019-01-04 13:31:22 +0100233 kvm_timer_update_irq(vcpu, true, ctx);
Christoffer Dallf2a21292017-06-18 00:32:08 -0700234 return HRTIMER_NORESTART;
235}
236
Christoffer Dall1c88ab72017-01-06 16:07:48 +0100237static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
Christoffer Dall1a748472015-03-13 17:02:55 +0000238{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100239 enum kvm_arch_timers index;
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100240 u64 cval, now;
Christoffer Dall1a748472015-03-13 17:02:55 +0000241
Christoffer Dallbee038a62019-01-04 13:31:22 +0100242 if (!timer_ctx)
243 return false;
244
Christoffer Dallbee038a62019-01-04 13:31:22 +0100245 index = arch_timer_ctx_index(timer_ctx);
246
247 if (timer_ctx->loaded) {
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100248 u32 cnt_ctl = 0;
Christoffer Dall13e59ec2018-01-25 14:20:19 +0100249
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100250 switch (index) {
251 case TIMER_VTIMER:
252 cnt_ctl = read_sysreg_el0(cntv_ctl);
253 break;
254 case TIMER_PTIMER:
255 cnt_ctl = read_sysreg_el0(cntp_ctl);
256 break;
257 case NR_KVM_TIMERS:
258 /* GCC is braindead */
259 cnt_ctl = 0;
260 break;
261 }
262
Christoffer Dall13e59ec2018-01-25 14:20:19 +0100263 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
264 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
265 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
266 }
267
Jintack Lim9171fa22017-02-03 10:20:01 -0500268 if (!kvm_timer_irq_can_fire(timer_ctx))
Christoffer Dall1a748472015-03-13 17:02:55 +0000269 return false;
270
Jintack Lim9171fa22017-02-03 10:20:01 -0500271 cval = timer_ctx->cnt_cval;
272 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
Christoffer Dall1a748472015-03-13 17:02:55 +0000273
274 return cval <= now;
275}
276
Christoffer Dall1c88ab72017-01-06 16:07:48 +0100277bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
278{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100279 struct timer_map map;
Christoffer Dall1c88ab72017-01-06 16:07:48 +0100280
Christoffer Dallbee038a62019-01-04 13:31:22 +0100281 get_timer_map(vcpu, &map);
Christoffer Dall1c88ab72017-01-06 16:07:48 +0100282
Christoffer Dallbee038a62019-01-04 13:31:22 +0100283 return kvm_timer_should_fire(map.direct_vtimer) ||
284 kvm_timer_should_fire(map.direct_ptimer) ||
285 kvm_timer_should_fire(map.emul_ptimer);
Christoffer Dall1c88ab72017-01-06 16:07:48 +0100286}
287
Alexander Grafd9e13972016-09-27 21:08:06 +0200288/*
289 * Reflect the timer output level into the kvm_run structure
290 */
291void kvm_timer_update_run(struct kvm_vcpu *vcpu)
292{
293 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
294 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
295 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
296
Alexander Grafd9e13972016-09-27 21:08:06 +0200297 /* Populate the device bitmap with the timer states */
298 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
299 KVM_ARM_DEV_EL1_PTIMER);
Christoffer Dall13e59ec2018-01-25 14:20:19 +0100300 if (kvm_timer_should_fire(vtimer))
Alexander Grafd9e13972016-09-27 21:08:06 +0200301 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
Christoffer Dall13e59ec2018-01-25 14:20:19 +0100302 if (kvm_timer_should_fire(ptimer))
Alexander Grafd9e13972016-09-27 21:08:06 +0200303 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
304}
305
Jintack Lim9171fa22017-02-03 10:20:01 -0500306static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
307 struct arch_timer_context *timer_ctx)
Christoffer Dall4b4b4512015-08-30 15:01:27 +0200308{
309 int ret;
Christoffer Dall4b4b4512015-08-30 15:01:27 +0200310
Jintack Lim9171fa22017-02-03 10:20:01 -0500311 timer_ctx->irq.level = new_level;
312 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
313 timer_ctx->irq.level);
Christoffer Dall11710de2017-02-01 11:03:45 +0100314
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100315 if (!userspace_irqchip(vcpu->kvm)) {
Alexander Grafd9e13972016-09-27 21:08:06 +0200316 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
317 timer_ctx->irq.irq,
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200318 timer_ctx->irq.level,
319 timer_ctx);
Alexander Grafd9e13972016-09-27 21:08:06 +0200320 WARN_ON(ret);
321 }
Christoffer Dall4b4b4512015-08-30 15:01:27 +0200322}
323
Christoffer Dallbee038a62019-01-04 13:31:22 +0100324static void timer_emulate(struct arch_timer_context *ctx)
Christoffer Dallcda93b72017-06-18 01:41:06 -0700325{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100326 bool should_fire = kvm_timer_should_fire(ctx);
327
328 trace_kvm_timer_emulate(ctx, should_fire);
329
330 if (should_fire) {
331 kvm_timer_update_irq(ctx->vcpu, true, ctx);
332 return;
333 }
Christoffer Dallcda93b72017-06-18 01:41:06 -0700334
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700335 /*
Christoffer Dall7afc4dd2018-07-25 10:21:27 +0100336 * If the timer can fire now, we don't need to have a soft timer
337 * scheduled for the future. If the timer cannot fire at all,
338 * then we also don't need a soft timer.
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700339 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100340 if (!kvm_timer_irq_can_fire(ctx)) {
341 soft_timer_cancel(&ctx->hrtimer);
Christoffer Dallcda93b72017-06-18 01:41:06 -0700342 return;
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700343 }
Christoffer Dallcda93b72017-06-18 01:41:06 -0700344
Christoffer Dallbee038a62019-01-04 13:31:22 +0100345 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
Christoffer Dall4b4b4512015-08-30 15:01:27 +0200346}
347
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100348static void timer_save_state(struct arch_timer_context *ctx)
Christoffer Dall688c50a2017-01-04 16:10:28 +0100349{
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100350 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
351 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200352 unsigned long flags;
353
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100354 if (!timer->enabled)
355 return;
356
Christoffer Dallb103cc32016-10-16 20:30:38 +0200357 local_irq_save(flags);
358
Christoffer Dallbee038a62019-01-04 13:31:22 +0100359 if (!ctx->loaded)
Christoffer Dallb103cc32016-10-16 20:30:38 +0200360 goto out;
Christoffer Dall688c50a2017-01-04 16:10:28 +0100361
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100362 switch (index) {
363 case TIMER_VTIMER:
364 ctx->cnt_ctl = read_sysreg_el0(cntv_ctl);
365 ctx->cnt_cval = read_sysreg_el0(cntv_cval);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100366
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100367 /* Disable the timer */
368 write_sysreg_el0(0, cntv_ctl);
369 isb();
370
371 break;
372 case TIMER_PTIMER:
373 ctx->cnt_ctl = read_sysreg_el0(cntp_ctl);
374 ctx->cnt_cval = read_sysreg_el0(cntp_cval);
375
376 /* Disable the timer */
377 write_sysreg_el0(0, cntp_ctl);
378 isb();
379
380 break;
381 case NR_KVM_TIMERS:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100382 BUG();
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100383 }
Christoffer Dallb103cc32016-10-16 20:30:38 +0200384
Christoffer Dallbee038a62019-01-04 13:31:22 +0100385 trace_kvm_timer_save_state(ctx);
386
387 ctx->loaded = false;
Christoffer Dallb103cc32016-10-16 20:30:38 +0200388out:
389 local_irq_restore(flags);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100390}
391
Christoffer Dalld35268d2015-08-25 19:48:21 +0200392/*
393 * Schedule the background timer before calling kvm_vcpu_block, so that this
394 * thread is removed from its waitqueue and made runnable when there's a timer
395 * interrupt to handle.
396 */
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100397static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
Christoffer Dalld35268d2015-08-25 19:48:21 +0200398{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700399 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallbee038a62019-01-04 13:31:22 +0100400 struct timer_map map;
401
402 get_timer_map(vcpu, &map);
Christoffer Dalld35268d2015-08-25 19:48:21 +0200403
Christoffer Dalld35268d2015-08-25 19:48:21 +0200404 /*
Christoffer Dallbee038a62019-01-04 13:31:22 +0100405 * If no timers are capable of raising interrupts (disabled or
Christoffer Dalld35268d2015-08-25 19:48:21 +0200406 * masked), then there's no more work for us to do.
407 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100408 if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
409 !kvm_timer_irq_can_fire(map.direct_ptimer) &&
410 !kvm_timer_irq_can_fire(map.emul_ptimer))
Christoffer Dalld35268d2015-08-25 19:48:21 +0200411 return;
412
Jintack Limfb280e92017-02-03 10:20:05 -0500413 /*
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100414 * At least one guest time will expire. Schedule a background timer.
Jintack Limfb280e92017-02-03 10:20:05 -0500415 * Set the earliest expiration time among the guest timers.
416 */
Christoffer Dall14d61fa2017-06-17 07:33:02 -0700417 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
Christoffer Dalld35268d2015-08-25 19:48:21 +0200418}
419
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100420static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
421{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700422 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100423
424 soft_timer_cancel(&timer->bg_timer);
425}
426
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100427static void timer_restore_state(struct arch_timer_context *ctx)
Christoffer Dall688c50a2017-01-04 16:10:28 +0100428{
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100429 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
430 enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200431 unsigned long flags;
432
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100433 if (!timer->enabled)
434 return;
435
Christoffer Dallb103cc32016-10-16 20:30:38 +0200436 local_irq_save(flags);
437
Christoffer Dallbee038a62019-01-04 13:31:22 +0100438 if (ctx->loaded)
Christoffer Dallb103cc32016-10-16 20:30:38 +0200439 goto out;
Christoffer Dall688c50a2017-01-04 16:10:28 +0100440
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100441 switch (index) {
442 case TIMER_VTIMER:
443 write_sysreg_el0(ctx->cnt_cval, cntv_cval);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100444 isb();
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100445 write_sysreg_el0(ctx->cnt_ctl, cntv_ctl);
446 break;
447 case TIMER_PTIMER:
448 write_sysreg_el0(ctx->cnt_cval, cntp_cval);
449 isb();
450 write_sysreg_el0(ctx->cnt_ctl, cntp_ctl);
451 break;
452 case NR_KVM_TIMERS:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100453 BUG();
Christoffer Dall688c50a2017-01-04 16:10:28 +0100454 }
Christoffer Dallb103cc32016-10-16 20:30:38 +0200455
Christoffer Dallbee038a62019-01-04 13:31:22 +0100456 trace_kvm_timer_restore_state(ctx);
457
458 ctx->loaded = true;
Christoffer Dallb103cc32016-10-16 20:30:38 +0200459out:
460 local_irq_restore(flags);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100461}
462
Christoffer Dall688c50a2017-01-04 16:10:28 +0100463static void set_cntvoff(u64 cntvoff)
464{
465 u32 low = lower_32_bits(cntvoff);
466 u32 high = upper_32_bits(cntvoff);
467
468 /*
469 * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
470 * 32-bit systems, but rather passes register by register shifted one
471 * place (we put the function address in r0/x0), we cannot simply pass
472 * a 64-bit value as an argument, but have to split the value in two
473 * 32-bit halves.
474 */
475 kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
476}
477
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100478static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100479{
480 int r;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100481 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100482 WARN_ON(r);
483}
484
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100485static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
Marc Zyngier53e72402013-01-23 13:21:58 -0500486{
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100487 struct kvm_vcpu *vcpu = ctx->vcpu;
Marc Zyngierbae561c2019-01-20 20:32:31 +0000488 bool phys_active = false;
489
490 /*
491 * Update the timer output so that it is likely to match the
492 * state we're about to restore. If the timer expires between
493 * this point and the register restoration, we'll take the
494 * interrupt anyway.
495 */
496 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
Marc Zyngier53e72402013-01-23 13:21:58 -0500497
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100498 if (irqchip_in_kernel(vcpu->kvm))
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100499 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
Marc Zyngierbae561c2019-01-20 20:32:31 +0000500
501 phys_active |= ctx->irq.level;
502
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100503 set_timer_irq_phys_active(ctx, phys_active);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200504}
Marc Zyngier9b4a3002016-01-29 19:04:48 +0000505
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100506static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
Christoffer Dallb103cc32016-10-16 20:30:38 +0200507{
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100508 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
509
510 /*
Christoffer Dall6bc21002019-04-25 13:57:40 +0100511 * Update the timer output so that it is likely to match the
512 * state we're about to restore. If the timer expires between
513 * this point and the register restoration, we'll take the
514 * interrupt anyway.
515 */
516 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
517
518 /*
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100519 * When using a userspace irqchip with the architected timers and a
520 * host interrupt controller that doesn't support an active state, we
521 * must still prevent continuously exiting from the guest, and
522 * therefore mask the physical interrupt by disabling it on the host
523 * interrupt controller when the virtual level is high, such that the
524 * guest can make forward progress. Once we detect the output level
525 * being de-asserted, we unmask the interrupt again so that we exit
526 * from the guest when the timer fires.
527 */
528 if (vtimer->irq.level)
529 disable_percpu_irq(host_vtimer_irq);
530 else
531 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200532}
533
534void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
535{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700536 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallbee038a62019-01-04 13:31:22 +0100537 struct timer_map map;
Christoffer Dallb103cc32016-10-16 20:30:38 +0200538
539 if (unlikely(!timer->enabled))
540 return;
541
Christoffer Dallbee038a62019-01-04 13:31:22 +0100542 get_timer_map(vcpu, &map);
543
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100544 if (static_branch_likely(&has_gic_active_state)) {
Christoffer Dallbee038a62019-01-04 13:31:22 +0100545 kvm_timer_vcpu_load_gic(map.direct_vtimer);
546 if (map.direct_ptimer)
547 kvm_timer_vcpu_load_gic(map.direct_ptimer);
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100548 } else {
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100549 kvm_timer_vcpu_load_nogic(vcpu);
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100550 }
Christoffer Dallb103cc32016-10-16 20:30:38 +0200551
Christoffer Dallbee038a62019-01-04 13:31:22 +0100552 set_cntvoff(map.direct_vtimer->cntvoff);
Christoffer Dall245715c2018-07-25 10:21:28 +0100553
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100554 kvm_timer_unblocking(vcpu);
555
Christoffer Dallbee038a62019-01-04 13:31:22 +0100556 timer_restore_state(map.direct_vtimer);
557 if (map.direct_ptimer)
558 timer_restore_state(map.direct_ptimer);
559
560 if (map.emul_ptimer)
561 timer_emulate(map.emul_ptimer);
Marc Zyngier53e72402013-01-23 13:21:58 -0500562}
563
Alexander Grafd9e13972016-09-27 21:08:06 +0200564bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
565{
566 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
567 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
568 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
569 bool vlevel, plevel;
570
571 if (likely(irqchip_in_kernel(vcpu->kvm)))
572 return false;
573
574 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
575 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
576
Christoffer Dall13e59ec2018-01-25 14:20:19 +0100577 return kvm_timer_should_fire(vtimer) != vlevel ||
578 kvm_timer_should_fire(ptimer) != plevel;
Alexander Grafd9e13972016-09-27 21:08:06 +0200579}
580
Christoffer Dallb103cc32016-10-16 20:30:38 +0200581void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
582{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700583 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallbee038a62019-01-04 13:31:22 +0100584 struct timer_map map;
Christoffer Dall688c50a2017-01-04 16:10:28 +0100585
Christoffer Dallb103cc32016-10-16 20:30:38 +0200586 if (unlikely(!timer->enabled))
587 return;
588
Christoffer Dallbee038a62019-01-04 13:31:22 +0100589 get_timer_map(vcpu, &map);
590
591 timer_save_state(map.direct_vtimer);
592 if (map.direct_ptimer)
593 timer_save_state(map.direct_ptimer);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200594
595 /*
Christoffer Dallbee038a62019-01-04 13:31:22 +0100596 * Cancel soft timer emulation, because the only case where we
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700597 * need it after a vcpu_put is in the context of a sleeping VCPU, and
598 * in that case we already factor in the deadline for the physical
599 * timer when scheduling the bg_timer.
600 *
601 * In any case, we re-schedule the hrtimer for the physical timer when
602 * coming back to the VCPU thread in kvm_timer_vcpu_load().
603 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100604 if (map.emul_ptimer)
605 soft_timer_cancel(&map.emul_ptimer->hrtimer);
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700606
Christoffer Dallaccb99b2018-11-26 18:21:22 +0100607 if (swait_active(kvm_arch_vcpu_wq(vcpu)))
608 kvm_timer_blocking(vcpu);
609
Christoffer Dallbbdd52c2017-06-18 01:42:55 -0700610 /*
Christoffer Dallb103cc32016-10-16 20:30:38 +0200611 * The kernel may decide to run userspace after calling vcpu_put, so
612 * we reset cntvoff to 0 to ensure a consistent read between user
613 * accesses to the virtual counter and kernel access to the physical
Shanker Donthineni250be9d2018-02-19 09:38:07 -0600614 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
615 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
Christoffer Dallb103cc32016-10-16 20:30:38 +0200616 */
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100617 set_cntvoff(0);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200618}
619
Christoffer Dall4c60e362017-10-27 19:34:30 +0200620/*
621 * With a userspace irqchip we have to check if the guest de-asserted the
622 * timer and if so, unmask the timer irq signal on the host interrupt
623 * controller to ensure that we see future timer signals.
624 */
625static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
Christoffer Dallb103cc32016-10-16 20:30:38 +0200626{
627 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
628
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100629 if (!kvm_timer_should_fire(vtimer)) {
630 kvm_timer_update_irq(vcpu, false, vtimer);
631 if (static_branch_likely(&has_gic_active_state))
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100632 set_timer_irq_phys_active(vtimer, false);
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100633 else
634 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
Christoffer Dallb103cc32016-10-16 20:30:38 +0200635 }
Alexander Grafd9e13972016-09-27 21:08:06 +0200636}
637
Marc Zyngier53e72402013-01-23 13:21:58 -0500638void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
639{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700640 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100641
642 if (unlikely(!timer->enabled))
643 return;
644
645 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
646 unmask_vtimer_irq_user(vcpu);
Marc Zyngier53e72402013-01-23 13:21:58 -0500647}
648
Christoffer Dall85e69ad2017-05-02 20:14:06 +0200649int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
Anup Patel5ae7f872013-04-30 12:02:15 +0530650{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700651 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallbee038a62019-01-04 13:31:22 +0100652 struct timer_map map;
653
654 get_timer_map(vcpu, &map);
Anup Patel5ae7f872013-04-30 12:02:15 +0530655
656 /*
Christoffer Dall4ad9e162015-09-04 16:24:39 +0200657 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
658 * and to 0 for ARMv7. We provide an implementation that always
659 * resets the timer to be disabled and unmasked and is compliant with
660 * the ARMv7 architecture.
661 */
Christoffer Dallbee038a62019-01-04 13:31:22 +0100662 vcpu_vtimer(vcpu)->cnt_ctl = 0;
663 vcpu_ptimer(vcpu)->cnt_ctl = 0;
Christoffer Dall4ad9e162015-09-04 16:24:39 +0200664
Christoffer Dallbee038a62019-01-04 13:31:22 +0100665 if (timer->enabled) {
666 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
667 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
668
669 if (irqchip_in_kernel(vcpu->kvm)) {
670 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
671 if (map.direct_ptimer)
672 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
673 }
674 }
675
676 if (map.emul_ptimer)
677 soft_timer_cancel(&map.emul_ptimer->hrtimer);
Christoffer Dall413aa802018-03-05 11:36:38 +0100678
Christoffer Dall41a54482016-05-18 16:26:00 +0100679 return 0;
Anup Patel5ae7f872013-04-30 12:02:15 +0530680}
681
Jintack Lim90de9432017-02-03 10:20:00 -0500682/* Make the updates of cntvoff for all vtimer contexts atomic */
683static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
684{
685 int i;
686 struct kvm *kvm = vcpu->kvm;
687 struct kvm_vcpu *tmp;
688
689 mutex_lock(&kvm->lock);
690 kvm_for_each_vcpu(i, tmp, kvm)
691 vcpu_vtimer(tmp)->cntvoff = cntvoff;
692
693 /*
694 * When called from the vcpu create path, the CPU being created is not
695 * included in the loop above, so we just set it here as well.
696 */
697 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
698 mutex_unlock(&kvm->lock);
699}
700
Marc Zyngier53e72402013-01-23 13:21:58 -0500701void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
702{
Christoffer Dalle604dd52018-09-18 10:08:18 -0700703 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dall85e69ad2017-05-02 20:14:06 +0200704 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
705 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
Marc Zyngier53e72402013-01-23 13:21:58 -0500706
Jintack Lim90de9432017-02-03 10:20:00 -0500707 /* Synchronize cntvoff across all vtimers of a VM. */
708 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
Christoffer Dalle604dd52018-09-18 10:08:18 -0700709 ptimer->cntvoff = 0;
Jintack Lim90de9432017-02-03 10:20:00 -0500710
Christoffer Dall14d61fa2017-06-17 07:33:02 -0700711 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
712 timer->bg_timer.function = kvm_bg_timer_expire;
Christoffer Dall85e69ad2017-05-02 20:14:06 +0200713
Christoffer Dallbee038a62019-01-04 13:31:22 +0100714 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Christoffer Dalle604dd52018-09-18 10:08:18 -0700715 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Christoffer Dallbee038a62019-01-04 13:31:22 +0100716 vtimer->hrtimer.function = kvm_hrtimer_expire;
717 ptimer->hrtimer.function = kvm_hrtimer_expire;
Christoffer Dallf2a21292017-06-18 00:32:08 -0700718
Christoffer Dall85e69ad2017-05-02 20:14:06 +0200719 vtimer->irq.irq = default_vtimer_irq.irq;
720 ptimer->irq.irq = default_ptimer_irq.irq;
Christoffer Dallbee038a62019-01-04 13:31:22 +0100721
722 vtimer->host_timer_irq = host_vtimer_irq;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100723 ptimer->host_timer_irq = host_ptimer_irq;
Christoffer Dallbee038a62019-01-04 13:31:22 +0100724
725 vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100726 ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
Christoffer Dalle604dd52018-09-18 10:08:18 -0700727
728 vtimer->vcpu = vcpu;
729 ptimer->vcpu = vcpu;
Marc Zyngier53e72402013-01-23 13:21:58 -0500730}
731
732static void kvm_timer_init_interrupt(void *info)
733{
Marc Zyngiercabdc5c2016-08-16 15:03:02 +0100734 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100735 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
Marc Zyngier53e72402013-01-23 13:21:58 -0500736}
737
Andre Przywara39735a32013-12-13 14:23:26 +0100738int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
739{
Christoffer Dallbee038a62019-01-04 13:31:22 +0100740 struct arch_timer_context *timer;
Christoffer Dallbee038a62019-01-04 13:31:22 +0100741
Andre Przywara39735a32013-12-13 14:23:26 +0100742 switch (regid) {
743 case KVM_REG_ARM_TIMER_CTL:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100744 timer = vcpu_vtimer(vcpu);
745 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
Andre Przywara39735a32013-12-13 14:23:26 +0100746 break;
747 case KVM_REG_ARM_TIMER_CNT:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100748 timer = vcpu_vtimer(vcpu);
Jintack Lim90de9432017-02-03 10:20:00 -0500749 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
Andre Przywara39735a32013-12-13 14:23:26 +0100750 break;
751 case KVM_REG_ARM_TIMER_CVAL:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100752 timer = vcpu_vtimer(vcpu);
753 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
Andre Przywara39735a32013-12-13 14:23:26 +0100754 break;
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700755 case KVM_REG_ARM_PTIMER_CTL:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100756 timer = vcpu_ptimer(vcpu);
757 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700758 break;
759 case KVM_REG_ARM_PTIMER_CVAL:
Christoffer Dallbee038a62019-01-04 13:31:22 +0100760 timer = vcpu_ptimer(vcpu);
761 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700762 break;
763
Andre Przywara39735a32013-12-13 14:23:26 +0100764 default:
765 return -1;
766 }
Christoffer Dall4b4b4512015-08-30 15:01:27 +0200767
Andre Przywara39735a32013-12-13 14:23:26 +0100768 return 0;
769}
770
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700771static u64 read_timer_ctl(struct arch_timer_context *timer)
772{
773 /*
774 * Set ISTATUS bit if it's expired.
775 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
776 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
777 * regardless of ENABLE bit for our implementation convenience.
778 */
779 if (!kvm_timer_compute_delta(timer))
780 return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
781 else
782 return timer->cnt_ctl;
783}
784
Andre Przywara39735a32013-12-13 14:23:26 +0100785u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
786{
Andre Przywara39735a32013-12-13 14:23:26 +0100787 switch (regid) {
788 case KVM_REG_ARM_TIMER_CTL:
Andre Przywara84135d32018-07-05 16:48:23 +0100789 return kvm_arm_timer_read(vcpu,
790 vcpu_vtimer(vcpu), TIMER_REG_CTL);
Andre Przywara39735a32013-12-13 14:23:26 +0100791 case KVM_REG_ARM_TIMER_CNT:
Andre Przywara84135d32018-07-05 16:48:23 +0100792 return kvm_arm_timer_read(vcpu,
793 vcpu_vtimer(vcpu), TIMER_REG_CNT);
Andre Przywara39735a32013-12-13 14:23:26 +0100794 case KVM_REG_ARM_TIMER_CVAL:
Andre Przywara84135d32018-07-05 16:48:23 +0100795 return kvm_arm_timer_read(vcpu,
796 vcpu_vtimer(vcpu), TIMER_REG_CVAL);
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700797 case KVM_REG_ARM_PTIMER_CTL:
Andre Przywara84135d32018-07-05 16:48:23 +0100798 return kvm_arm_timer_read(vcpu,
799 vcpu_ptimer(vcpu), TIMER_REG_CTL);
Christoffer Dall5c5196d2017-06-16 23:08:57 -0700800 case KVM_REG_ARM_PTIMER_CNT:
Andre Przywara84135d32018-07-05 16:48:23 +0100801 return kvm_arm_timer_read(vcpu,
802 vcpu_vtimer(vcpu), TIMER_REG_CNT);
803 case KVM_REG_ARM_PTIMER_CVAL:
804 return kvm_arm_timer_read(vcpu,
805 vcpu_ptimer(vcpu), TIMER_REG_CVAL);
Andre Przywara39735a32013-12-13 14:23:26 +0100806 }
807 return (u64)-1;
808}
Marc Zyngier53e72402013-01-23 13:21:58 -0500809
Andre Przywara84135d32018-07-05 16:48:23 +0100810static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
811 struct arch_timer_context *timer,
812 enum kvm_arch_timer_regs treg)
813{
814 u64 val;
815
816 switch (treg) {
817 case TIMER_REG_TVAL:
Wei Huang8fa76162019-03-29 15:12:53 -0500818 val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
Andre Przywara84135d32018-07-05 16:48:23 +0100819 break;
820
821 case TIMER_REG_CTL:
822 val = read_timer_ctl(timer);
823 break;
824
825 case TIMER_REG_CVAL:
826 val = timer->cnt_cval;
827 break;
828
829 case TIMER_REG_CNT:
830 val = kvm_phys_timer_read() - timer->cntvoff;
831 break;
832
833 default:
834 BUG();
835 }
836
837 return val;
838}
839
840u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
841 enum kvm_arch_timers tmr,
842 enum kvm_arch_timer_regs treg)
843{
844 u64 val;
845
846 preempt_disable();
847 kvm_timer_vcpu_put(vcpu);
848
849 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
850
851 kvm_timer_vcpu_load(vcpu);
852 preempt_enable();
853
854 return val;
855}
856
857static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
858 struct arch_timer_context *timer,
859 enum kvm_arch_timer_regs treg,
860 u64 val)
861{
862 switch (treg) {
863 case TIMER_REG_TVAL:
Wei Huang8fa76162019-03-29 15:12:53 -0500864 timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
Andre Przywara84135d32018-07-05 16:48:23 +0100865 break;
866
867 case TIMER_REG_CTL:
868 timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
869 break;
870
871 case TIMER_REG_CVAL:
872 timer->cnt_cval = val;
873 break;
874
875 default:
876 BUG();
877 }
878}
879
880void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
881 enum kvm_arch_timers tmr,
882 enum kvm_arch_timer_regs treg,
883 u64 val)
884{
885 preempt_disable();
886 kvm_timer_vcpu_put(vcpu);
887
888 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
889
890 kvm_timer_vcpu_load(vcpu);
891 preempt_enable();
892}
893
Richard Cochranb3c99502016-07-13 17:16:47 +0000894static int kvm_timer_starting_cpu(unsigned int cpu)
Marc Zyngier53e72402013-01-23 13:21:58 -0500895{
Richard Cochranb3c99502016-07-13 17:16:47 +0000896 kvm_timer_init_interrupt(NULL);
897 return 0;
Marc Zyngier53e72402013-01-23 13:21:58 -0500898}
899
Richard Cochranb3c99502016-07-13 17:16:47 +0000900static int kvm_timer_dying_cpu(unsigned int cpu)
901{
902 disable_percpu_irq(host_vtimer_irq);
903 return 0;
904}
Marc Zyngier53e72402013-01-23 13:21:58 -0500905
Marc Zyngierf384dcf2017-12-07 11:46:15 +0000906int kvm_timer_hyp_init(bool has_gic)
Marc Zyngier53e72402013-01-23 13:21:58 -0500907{
Julien Grall29c2d6f2016-04-11 16:32:58 +0100908 struct arch_timer_kvm_info *info;
Marc Zyngier53e72402013-01-23 13:21:58 -0500909 int err;
910
Julien Grall29c2d6f2016-04-11 16:32:58 +0100911 info = arch_timer_get_kvm_info();
912 timecounter = &info->timecounter;
Marc Zyngier53e72402013-01-23 13:21:58 -0500913
Christoffer Dall8e1a0472016-12-05 10:32:11 +0100914 if (!timecounter->cc) {
915 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
916 return -ENODEV;
917 }
918
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100919 /* First, do the virtual EL1 timer irq */
920
Julien Grall29c2d6f2016-04-11 16:32:58 +0100921 if (info->virtual_irq <= 0) {
922 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
923 info->virtual_irq);
Marc Zyngier53e72402013-01-23 13:21:58 -0500924 return -ENODEV;
925 }
Julien Grall29c2d6f2016-04-11 16:32:58 +0100926 host_vtimer_irq = info->virtual_irq;
Marc Zyngier53e72402013-01-23 13:21:58 -0500927
Marc Zyngiercabdc5c2016-08-16 15:03:02 +0100928 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
929 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
930 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100931 kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
Marc Zyngiercabdc5c2016-08-16 15:03:02 +0100932 host_vtimer_irq);
933 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
934 }
935
Julien Grall29c2d6f2016-04-11 16:32:58 +0100936 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100937 "kvm guest vtimer", kvm_get_running_vcpus());
Marc Zyngier53e72402013-01-23 13:21:58 -0500938 if (err) {
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100939 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
Julien Grall29c2d6f2016-04-11 16:32:58 +0100940 host_vtimer_irq, err);
Paolo Bonzini5d947a12016-09-08 12:45:59 +0200941 return err;
Marc Zyngier53e72402013-01-23 13:21:58 -0500942 }
943
Marc Zyngierf384dcf2017-12-07 11:46:15 +0000944 if (has_gic) {
945 err = irq_set_vcpu_affinity(host_vtimer_irq,
946 kvm_get_running_vcpus());
947 if (err) {
948 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
949 goto out_free_irq;
950 }
Christoffer Dalld60d8b62018-01-26 16:06:51 +0100951
952 static_branch_enable(&has_gic_active_state);
Christoffer Dall40f4cba2017-07-05 12:50:27 +0200953 }
954
Ard Biesheuvel76600422018-03-02 08:16:30 +0000955 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
Marc Zyngier53e72402013-01-23 13:21:58 -0500956
Christoffer Dall9e01dc72019-02-19 14:04:30 +0100957 /* Now let's do the physical EL1 timer irq */
958
959 if (info->physical_irq > 0) {
960 host_ptimer_irq = info->physical_irq;
961 host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
962 if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
963 host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
964 kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
965 host_ptimer_irq);
966 host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
967 }
968
969 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
970 "kvm guest ptimer", kvm_get_running_vcpus());
971 if (err) {
972 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
973 host_ptimer_irq, err);
974 return err;
975 }
976
977 if (has_gic) {
978 err = irq_set_vcpu_affinity(host_ptimer_irq,
979 kvm_get_running_vcpus());
980 if (err) {
981 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
982 goto out_free_irq;
983 }
984 }
985
986 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
987 } else if (has_vhe()) {
988 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
989 info->physical_irq);
990 err = -ENODEV;
991 goto out_free_irq;
992 }
993
Richard Cochranb3c99502016-07-13 17:16:47 +0000994 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100995 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
Richard Cochranb3c99502016-07-13 17:16:47 +0000996 kvm_timer_dying_cpu);
Christoffer Dall40f4cba2017-07-05 12:50:27 +0200997 return 0;
998out_free_irq:
999 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
Marc Zyngier53e72402013-01-23 13:21:58 -05001000 return err;
1001}
1002
1003void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1004{
Christoffer Dalle604dd52018-09-18 10:08:18 -07001005 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Marc Zyngier53e72402013-01-23 13:21:58 -05001006
Christoffer Dall8a411b02018-11-27 13:48:08 +01001007 soft_timer_cancel(&timer->bg_timer);
Marc Zyngier53e72402013-01-23 13:21:58 -05001008}
1009
Christoffer Dallabcb8512017-05-04 13:32:53 +02001010static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
Christoffer Dall99a1db72017-05-02 20:19:15 +02001011{
Christoffer Dall99a1db72017-05-02 20:19:15 +02001012 int vtimer_irq, ptimer_irq;
Christoffer Dallabcb8512017-05-04 13:32:53 +02001013 int i, ret;
Christoffer Dall99a1db72017-05-02 20:19:15 +02001014
Christoffer Dall99a1db72017-05-02 20:19:15 +02001015 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
Christoffer Dallabcb8512017-05-04 13:32:53 +02001016 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1017 if (ret)
Christoffer Dall99a1db72017-05-02 20:19:15 +02001018 return false;
1019
Christoffer Dallabcb8512017-05-04 13:32:53 +02001020 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1021 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1022 if (ret)
1023 return false;
1024
1025 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
Christoffer Dall99a1db72017-05-02 20:19:15 +02001026 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1027 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1028 return false;
1029 }
1030
1031 return true;
1032}
1033
Christoffer Dall4c60e362017-10-27 19:34:30 +02001034bool kvm_arch_timer_get_input_level(int vintid)
1035{
1036 struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
1037 struct arch_timer_context *timer;
1038
1039 if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1040 timer = vcpu_vtimer(vcpu);
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001041 else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1042 timer = vcpu_ptimer(vcpu);
Christoffer Dall4c60e362017-10-27 19:34:30 +02001043 else
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001044 BUG();
Christoffer Dall4c60e362017-10-27 19:34:30 +02001045
Christoffer Dall4c60e362017-10-27 19:34:30 +02001046 return kvm_timer_should_fire(timer);
1047}
1048
Christoffer Dall41a54482016-05-18 16:26:00 +01001049int kvm_timer_enable(struct kvm_vcpu *vcpu)
Marc Zyngier53e72402013-01-23 13:21:58 -05001050{
Christoffer Dalle604dd52018-09-18 10:08:18 -07001051 struct arch_timer_cpu *timer = vcpu_timer(vcpu);
Christoffer Dallbee038a62019-01-04 13:31:22 +01001052 struct timer_map map;
Christoffer Dall41a54482016-05-18 16:26:00 +01001053 int ret;
1054
1055 if (timer->enabled)
1056 return 0;
1057
Alexander Grafd9e13972016-09-27 21:08:06 +02001058 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1059 if (!irqchip_in_kernel(vcpu->kvm))
1060 goto no_vgic;
1061
1062 if (!vgic_initialized(vcpu->kvm))
1063 return -ENODEV;
1064
Christoffer Dallabcb8512017-05-04 13:32:53 +02001065 if (!timer_irqs_are_valid(vcpu)) {
Christoffer Dall99a1db72017-05-02 20:19:15 +02001066 kvm_debug("incorrectly configured timer irqs\n");
1067 return -EINVAL;
1068 }
1069
Christoffer Dallbee038a62019-01-04 13:31:22 +01001070 get_timer_map(vcpu, &map);
1071
1072 ret = kvm_vgic_map_phys_irq(vcpu,
1073 map.direct_vtimer->host_timer_irq,
1074 map.direct_vtimer->irq.irq,
Christoffer Dall4c60e362017-10-27 19:34:30 +02001075 kvm_arch_timer_get_input_level);
Christoffer Dall41a54482016-05-18 16:26:00 +01001076 if (ret)
1077 return ret;
1078
Christoffer Dallbee038a62019-01-04 13:31:22 +01001079 if (map.direct_ptimer) {
1080 ret = kvm_vgic_map_phys_irq(vcpu,
1081 map.direct_ptimer->host_timer_irq,
1082 map.direct_ptimer->irq.irq,
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001083 kvm_arch_timer_get_input_level);
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001084 }
1085
Christoffer Dallbee038a62019-01-04 13:31:22 +01001086 if (ret)
1087 return ret;
1088
Alexander Grafd9e13972016-09-27 21:08:06 +02001089no_vgic:
Longpeng(Mike)fd5ebf92016-11-09 10:50:14 +08001090 timer->enabled = 1;
Christoffer Dall41a54482016-05-18 16:26:00 +01001091 return 0;
Christoffer Dall05971122014-12-12 21:19:23 +01001092}
1093
Jintack Lim488f94d2016-12-01 14:32:05 -05001094/*
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001095 * On VHE system, we only need to configure the EL2 timer trap register once,
1096 * not for every world switch.
Jintack Lim488f94d2016-12-01 14:32:05 -05001097 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1098 * and this makes those bits have no effect for the host kernel execution.
1099 */
1100void kvm_timer_init_vhe(void)
1101{
1102 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1103 u32 cnthctl_shift = 10;
1104 u64 val;
1105
1106 /*
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001107 * VHE systems allow the guest direct access to the EL1 physical
1108 * timer/counter.
Jintack Lim488f94d2016-12-01 14:32:05 -05001109 */
1110 val = read_sysreg(cnthctl_el2);
Christoffer Dall9e01dc72019-02-19 14:04:30 +01001111 val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
Jintack Lim488f94d2016-12-01 14:32:05 -05001112 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1113 write_sysreg(val, cnthctl_el2);
1114}
Christoffer Dall99a1db72017-05-02 20:19:15 +02001115
1116static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1117{
1118 struct kvm_vcpu *vcpu;
1119 int i;
1120
1121 kvm_for_each_vcpu(i, vcpu, kvm) {
1122 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1123 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1124 }
1125}
1126
1127int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1128{
1129 int __user *uaddr = (int __user *)(long)attr->addr;
1130 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1131 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1132 int irq;
1133
1134 if (!irqchip_in_kernel(vcpu->kvm))
1135 return -EINVAL;
1136
1137 if (get_user(irq, uaddr))
1138 return -EFAULT;
1139
1140 if (!(irq_is_ppi(irq)))
1141 return -EINVAL;
1142
1143 if (vcpu->arch.timer_cpu.enabled)
1144 return -EBUSY;
1145
1146 switch (attr->attr) {
1147 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1148 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1149 break;
1150 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1151 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1152 break;
1153 default:
1154 return -ENXIO;
1155 }
1156
1157 return 0;
1158}
1159
1160int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1161{
1162 int __user *uaddr = (int __user *)(long)attr->addr;
1163 struct arch_timer_context *timer;
1164 int irq;
1165
1166 switch (attr->attr) {
1167 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1168 timer = vcpu_vtimer(vcpu);
1169 break;
1170 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1171 timer = vcpu_ptimer(vcpu);
1172 break;
1173 default:
1174 return -ENXIO;
1175 }
1176
1177 irq = timer->irq.irq;
1178 return put_user(irq, uaddr);
1179}
1180
1181int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1182{
1183 switch (attr->attr) {
1184 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1185 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1186 return 0;
1187 }
1188
1189 return -ENXIO;
1190}