Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 ARM Ltd. |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 17 | */ |
| 18 | |
| 19 | #include <linux/cpu.h> |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 20 | #include <linux/kvm.h> |
| 21 | #include <linux/kvm_host.h> |
| 22 | #include <linux/interrupt.h> |
Christoffer Dall | b452cb5 | 2016-06-04 15:41:00 +0100 | [diff] [blame] | 23 | #include <linux/irq.h> |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 24 | #include <linux/uaccess.h> |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 25 | |
Mark Rutland | 372b7c1 | 2013-03-27 15:56:11 +0000 | [diff] [blame] | 26 | #include <clocksource/arm_arch_timer.h> |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 27 | #include <asm/arch_timer.h> |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 28 | #include <asm/kvm_emulate.h> |
Jintack Lim | 488f94d | 2016-12-01 14:32:05 -0500 | [diff] [blame] | 29 | #include <asm/kvm_hyp.h> |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 30 | |
Marc Zyngier | 7275acd | 2013-05-14 14:31:01 +0100 | [diff] [blame] | 31 | #include <kvm/arm_vgic.h> |
| 32 | #include <kvm/arm_arch_timer.h> |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 33 | |
Christoffer Dall | e21f091 | 2015-08-30 13:57:20 +0200 | [diff] [blame] | 34 | #include "trace.h" |
| 35 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 36 | static struct timecounter *timecounter; |
Anup Patel | 5ae7f87 | 2013-04-30 12:02:15 +0530 | [diff] [blame] | 37 | static unsigned int host_vtimer_irq; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 38 | static unsigned int host_ptimer_irq; |
Marc Zyngier | cabdc5c | 2016-08-16 15:03:02 +0100 | [diff] [blame] | 39 | static u32 host_vtimer_irq_flags; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 40 | static u32 host_ptimer_irq_flags; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 41 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 42 | static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); |
| 43 | |
Christoffer Dall | 85e69ad | 2017-05-02 20:14:06 +0200 | [diff] [blame] | 44 | static const struct kvm_irq_level default_ptimer_irq = { |
| 45 | .irq = 30, |
| 46 | .level = 1, |
| 47 | }; |
| 48 | |
| 49 | static const struct kvm_irq_level default_vtimer_irq = { |
| 50 | .irq = 27, |
| 51 | .level = 1, |
| 52 | }; |
| 53 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 54 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); |
| 55 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, |
| 56 | struct arch_timer_context *timer_ctx); |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 57 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 58 | static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, |
| 59 | struct arch_timer_context *timer, |
| 60 | enum kvm_arch_timer_regs treg, |
| 61 | u64 val); |
| 62 | static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, |
| 63 | struct arch_timer_context *timer, |
| 64 | enum kvm_arch_timer_regs treg); |
Marc Zyngier | 9b4a300 | 2016-01-29 19:04:48 +0000 | [diff] [blame] | 65 | |
Jintack Lim | 7b6b463 | 2017-02-03 10:20:08 -0500 | [diff] [blame] | 66 | u64 kvm_phys_timer_read(void) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 67 | { |
| 68 | return timecounter->cc->read(timecounter->cc); |
| 69 | } |
| 70 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 71 | static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) |
| 72 | { |
| 73 | if (has_vhe()) { |
| 74 | map->direct_vtimer = vcpu_vtimer(vcpu); |
| 75 | map->direct_ptimer = vcpu_ptimer(vcpu); |
| 76 | map->emul_ptimer = NULL; |
| 77 | } else { |
| 78 | map->direct_vtimer = vcpu_vtimer(vcpu); |
| 79 | map->direct_ptimer = NULL; |
| 80 | map->emul_ptimer = vcpu_ptimer(vcpu); |
| 81 | } |
| 82 | |
| 83 | trace_kvm_get_timer_map(vcpu->vcpu_id, map); |
| 84 | } |
| 85 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 86 | static inline bool userspace_irqchip(struct kvm *kvm) |
| 87 | { |
| 88 | return static_branch_unlikely(&userspace_irqchip_in_use) && |
| 89 | unlikely(!irqchip_in_kernel(kvm)); |
| 90 | } |
| 91 | |
Christoffer Dall | 8409a06 | 2017-06-17 01:09:19 -0700 | [diff] [blame] | 92 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 93 | { |
Christoffer Dall | 8409a06 | 2017-06-17 01:09:19 -0700 | [diff] [blame] | 94 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 95 | HRTIMER_MODE_ABS); |
| 96 | } |
| 97 | |
Christoffer Dall | 8a411b0 | 2018-11-27 13:48:08 +0100 | [diff] [blame] | 98 | static void soft_timer_cancel(struct hrtimer *hrt) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 99 | { |
Christoffer Dall | 8409a06 | 2017-06-17 01:09:19 -0700 | [diff] [blame] | 100 | hrtimer_cancel(hrt); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 101 | } |
| 102 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 103 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
| 104 | { |
| 105 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 106 | struct arch_timer_context *ctx; |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 107 | struct timer_map map; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 108 | |
Christoffer Dall | 36e5cfd | 2017-12-14 19:54:50 +0100 | [diff] [blame] | 109 | /* |
| 110 | * We may see a timer interrupt after vcpu_put() has been called which |
| 111 | * sets the CPU's vcpu pointer to NULL, because even though the timer |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 112 | * has been disabled in timer_save_state(), the hardware interrupt |
Christoffer Dall | 36e5cfd | 2017-12-14 19:54:50 +0100 | [diff] [blame] | 113 | * signal may not have been retired from the interrupt controller yet. |
| 114 | */ |
| 115 | if (!vcpu) |
| 116 | return IRQ_HANDLED; |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 117 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 118 | get_timer_map(vcpu, &map); |
| 119 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 120 | if (irq == host_vtimer_irq) |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 121 | ctx = map.direct_vtimer; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 122 | else |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 123 | ctx = map.direct_ptimer; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 124 | |
| 125 | if (kvm_timer_should_fire(ctx)) |
| 126 | kvm_timer_update_irq(vcpu, true, ctx); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 127 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 128 | if (userspace_irqchip(vcpu->kvm) && |
| 129 | !static_branch_unlikely(&has_gic_active_state)) |
| 130 | disable_percpu_irq(host_vtimer_irq); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 131 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 132 | return IRQ_HANDLED; |
| 133 | } |
| 134 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 135 | static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 136 | { |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 137 | u64 cval, now; |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 138 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 139 | cval = timer_ctx->cnt_cval; |
| 140 | now = kvm_phys_timer_read() - timer_ctx->cntvoff; |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 141 | |
| 142 | if (now < cval) { |
| 143 | u64 ns; |
| 144 | |
| 145 | ns = cyclecounter_cyc2ns(timecounter->cc, |
| 146 | cval - now, |
| 147 | timecounter->mask, |
| 148 | &timecounter->frac); |
| 149 | return ns; |
| 150 | } |
| 151 | |
| 152 | return 0; |
| 153 | } |
| 154 | |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 155 | static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) |
| 156 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 157 | WARN_ON(timer_ctx && timer_ctx->loaded); |
| 158 | return timer_ctx && |
| 159 | !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 160 | (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * Returns the earliest expiration time in ns among guest timers. |
| 165 | * Note that it will return 0 if none of timers can fire. |
| 166 | */ |
| 167 | static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) |
| 168 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 169 | u64 min_delta = ULLONG_MAX; |
| 170 | int i; |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 171 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 172 | for (i = 0; i < NR_KVM_TIMERS; i++) { |
| 173 | struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 174 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 175 | WARN(ctx->loaded, "timer %d loaded\n", i); |
| 176 | if (kvm_timer_irq_can_fire(ctx)) |
| 177 | min_delta = min(min_delta, kvm_timer_compute_delta(ctx)); |
| 178 | } |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 179 | |
| 180 | /* If none of timers can fire, then return 0 */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 181 | if (min_delta == ULLONG_MAX) |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 182 | return 0; |
| 183 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 184 | return min_delta; |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 185 | } |
| 186 | |
Christoffer Dall | 14d61fa | 2017-06-17 07:33:02 -0700 | [diff] [blame] | 187 | static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 188 | { |
| 189 | struct arch_timer_cpu *timer; |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 190 | struct kvm_vcpu *vcpu; |
| 191 | u64 ns; |
| 192 | |
Christoffer Dall | 14d61fa | 2017-06-17 07:33:02 -0700 | [diff] [blame] | 193 | timer = container_of(hrt, struct arch_timer_cpu, bg_timer); |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 194 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); |
| 195 | |
| 196 | /* |
| 197 | * Check that the timer has really expired from the guest's |
| 198 | * PoV (NTP on the host may have forced it to expire |
| 199 | * early). If we should have slept longer, restart it. |
| 200 | */ |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 201 | ns = kvm_timer_earliest_exp(vcpu); |
Marc Zyngier | 1c5631c | 2016-04-06 09:37:22 +0100 | [diff] [blame] | 202 | if (unlikely(ns)) { |
| 203 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); |
| 204 | return HRTIMER_RESTART; |
| 205 | } |
| 206 | |
Christoffer Dall | 8a411b0 | 2018-11-27 13:48:08 +0100 | [diff] [blame] | 207 | kvm_vcpu_wake_up(vcpu); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 208 | return HRTIMER_NORESTART; |
| 209 | } |
| 210 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 211 | static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt) |
Christoffer Dall | f2a2129 | 2017-06-18 00:32:08 -0700 | [diff] [blame] | 212 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 213 | struct arch_timer_context *ctx; |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 214 | struct kvm_vcpu *vcpu; |
| 215 | u64 ns; |
| 216 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 217 | ctx = container_of(hrt, struct arch_timer_context, hrtimer); |
| 218 | vcpu = ctx->vcpu; |
| 219 | |
| 220 | trace_kvm_timer_hrtimer_expire(ctx); |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 221 | |
| 222 | /* |
| 223 | * Check that the timer has really expired from the guest's |
| 224 | * PoV (NTP on the host may have forced it to expire |
| 225 | * early). If not ready, schedule for a later time. |
| 226 | */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 227 | ns = kvm_timer_compute_delta(ctx); |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 228 | if (unlikely(ns)) { |
| 229 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); |
| 230 | return HRTIMER_RESTART; |
| 231 | } |
| 232 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 233 | kvm_timer_update_irq(vcpu, true, ctx); |
Christoffer Dall | f2a2129 | 2017-06-18 00:32:08 -0700 | [diff] [blame] | 234 | return HRTIMER_NORESTART; |
| 235 | } |
| 236 | |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 237 | static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) |
Christoffer Dall | 1a74847 | 2015-03-13 17:02:55 +0000 | [diff] [blame] | 238 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 239 | enum kvm_arch_timers index; |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 240 | u64 cval, now; |
Christoffer Dall | 1a74847 | 2015-03-13 17:02:55 +0000 | [diff] [blame] | 241 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 242 | if (!timer_ctx) |
| 243 | return false; |
| 244 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 245 | index = arch_timer_ctx_index(timer_ctx); |
| 246 | |
| 247 | if (timer_ctx->loaded) { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 248 | u32 cnt_ctl = 0; |
Christoffer Dall | 13e59ec | 2018-01-25 14:20:19 +0100 | [diff] [blame] | 249 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 250 | switch (index) { |
| 251 | case TIMER_VTIMER: |
| 252 | cnt_ctl = read_sysreg_el0(cntv_ctl); |
| 253 | break; |
| 254 | case TIMER_PTIMER: |
| 255 | cnt_ctl = read_sysreg_el0(cntp_ctl); |
| 256 | break; |
| 257 | case NR_KVM_TIMERS: |
| 258 | /* GCC is braindead */ |
| 259 | cnt_ctl = 0; |
| 260 | break; |
| 261 | } |
| 262 | |
Christoffer Dall | 13e59ec | 2018-01-25 14:20:19 +0100 | [diff] [blame] | 263 | return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) && |
| 264 | (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) && |
| 265 | !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK); |
| 266 | } |
| 267 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 268 | if (!kvm_timer_irq_can_fire(timer_ctx)) |
Christoffer Dall | 1a74847 | 2015-03-13 17:02:55 +0000 | [diff] [blame] | 269 | return false; |
| 270 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 271 | cval = timer_ctx->cnt_cval; |
| 272 | now = kvm_phys_timer_read() - timer_ctx->cntvoff; |
Christoffer Dall | 1a74847 | 2015-03-13 17:02:55 +0000 | [diff] [blame] | 273 | |
| 274 | return cval <= now; |
| 275 | } |
| 276 | |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 277 | bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) |
| 278 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 279 | struct timer_map map; |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 280 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 281 | get_timer_map(vcpu, &map); |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 282 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 283 | return kvm_timer_should_fire(map.direct_vtimer) || |
| 284 | kvm_timer_should_fire(map.direct_ptimer) || |
| 285 | kvm_timer_should_fire(map.emul_ptimer); |
Christoffer Dall | 1c88ab7 | 2017-01-06 16:07:48 +0100 | [diff] [blame] | 286 | } |
| 287 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 288 | /* |
| 289 | * Reflect the timer output level into the kvm_run structure |
| 290 | */ |
| 291 | void kvm_timer_update_run(struct kvm_vcpu *vcpu) |
| 292 | { |
| 293 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 294 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
| 295 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; |
| 296 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 297 | /* Populate the device bitmap with the timer states */ |
| 298 | regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | |
| 299 | KVM_ARM_DEV_EL1_PTIMER); |
Christoffer Dall | 13e59ec | 2018-01-25 14:20:19 +0100 | [diff] [blame] | 300 | if (kvm_timer_should_fire(vtimer)) |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 301 | regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; |
Christoffer Dall | 13e59ec | 2018-01-25 14:20:19 +0100 | [diff] [blame] | 302 | if (kvm_timer_should_fire(ptimer)) |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 303 | regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; |
| 304 | } |
| 305 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 306 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, |
| 307 | struct arch_timer_context *timer_ctx) |
Christoffer Dall | 4b4b451 | 2015-08-30 15:01:27 +0200 | [diff] [blame] | 308 | { |
| 309 | int ret; |
Christoffer Dall | 4b4b451 | 2015-08-30 15:01:27 +0200 | [diff] [blame] | 310 | |
Jintack Lim | 9171fa2 | 2017-02-03 10:20:01 -0500 | [diff] [blame] | 311 | timer_ctx->irq.level = new_level; |
| 312 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, |
| 313 | timer_ctx->irq.level); |
Christoffer Dall | 11710de | 2017-02-01 11:03:45 +0100 | [diff] [blame] | 314 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 315 | if (!userspace_irqchip(vcpu->kvm)) { |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 316 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, |
| 317 | timer_ctx->irq.irq, |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 318 | timer_ctx->irq.level, |
| 319 | timer_ctx); |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 320 | WARN_ON(ret); |
| 321 | } |
Christoffer Dall | 4b4b451 | 2015-08-30 15:01:27 +0200 | [diff] [blame] | 322 | } |
| 323 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 324 | static void timer_emulate(struct arch_timer_context *ctx) |
Christoffer Dall | cda93b7 | 2017-06-18 01:41:06 -0700 | [diff] [blame] | 325 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 326 | bool should_fire = kvm_timer_should_fire(ctx); |
| 327 | |
| 328 | trace_kvm_timer_emulate(ctx, should_fire); |
| 329 | |
| 330 | if (should_fire) { |
| 331 | kvm_timer_update_irq(ctx->vcpu, true, ctx); |
| 332 | return; |
| 333 | } |
Christoffer Dall | cda93b7 | 2017-06-18 01:41:06 -0700 | [diff] [blame] | 334 | |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 335 | /* |
Christoffer Dall | 7afc4dd | 2018-07-25 10:21:27 +0100 | [diff] [blame] | 336 | * If the timer can fire now, we don't need to have a soft timer |
| 337 | * scheduled for the future. If the timer cannot fire at all, |
| 338 | * then we also don't need a soft timer. |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 339 | */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 340 | if (!kvm_timer_irq_can_fire(ctx)) { |
| 341 | soft_timer_cancel(&ctx->hrtimer); |
Christoffer Dall | cda93b7 | 2017-06-18 01:41:06 -0700 | [diff] [blame] | 342 | return; |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 343 | } |
Christoffer Dall | cda93b7 | 2017-06-18 01:41:06 -0700 | [diff] [blame] | 344 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 345 | soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx)); |
Christoffer Dall | 4b4b451 | 2015-08-30 15:01:27 +0200 | [diff] [blame] | 346 | } |
| 347 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 348 | static void timer_save_state(struct arch_timer_context *ctx) |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 349 | { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 350 | struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); |
| 351 | enum kvm_arch_timers index = arch_timer_ctx_index(ctx); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 352 | unsigned long flags; |
| 353 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 354 | if (!timer->enabled) |
| 355 | return; |
| 356 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 357 | local_irq_save(flags); |
| 358 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 359 | if (!ctx->loaded) |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 360 | goto out; |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 361 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 362 | switch (index) { |
| 363 | case TIMER_VTIMER: |
| 364 | ctx->cnt_ctl = read_sysreg_el0(cntv_ctl); |
| 365 | ctx->cnt_cval = read_sysreg_el0(cntv_cval); |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 366 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 367 | /* Disable the timer */ |
| 368 | write_sysreg_el0(0, cntv_ctl); |
| 369 | isb(); |
| 370 | |
| 371 | break; |
| 372 | case TIMER_PTIMER: |
| 373 | ctx->cnt_ctl = read_sysreg_el0(cntp_ctl); |
| 374 | ctx->cnt_cval = read_sysreg_el0(cntp_cval); |
| 375 | |
| 376 | /* Disable the timer */ |
| 377 | write_sysreg_el0(0, cntp_ctl); |
| 378 | isb(); |
| 379 | |
| 380 | break; |
| 381 | case NR_KVM_TIMERS: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 382 | BUG(); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 383 | } |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 384 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 385 | trace_kvm_timer_save_state(ctx); |
| 386 | |
| 387 | ctx->loaded = false; |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 388 | out: |
| 389 | local_irq_restore(flags); |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 390 | } |
| 391 | |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 392 | /* |
| 393 | * Schedule the background timer before calling kvm_vcpu_block, so that this |
| 394 | * thread is removed from its waitqueue and made runnable when there's a timer |
| 395 | * interrupt to handle. |
| 396 | */ |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 397 | static void kvm_timer_blocking(struct kvm_vcpu *vcpu) |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 398 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 399 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 400 | struct timer_map map; |
| 401 | |
| 402 | get_timer_map(vcpu, &map); |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 403 | |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 404 | /* |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 405 | * If no timers are capable of raising interrupts (disabled or |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 406 | * masked), then there's no more work for us to do. |
| 407 | */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 408 | if (!kvm_timer_irq_can_fire(map.direct_vtimer) && |
| 409 | !kvm_timer_irq_can_fire(map.direct_ptimer) && |
| 410 | !kvm_timer_irq_can_fire(map.emul_ptimer)) |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 411 | return; |
| 412 | |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 413 | /* |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 414 | * At least one guest time will expire. Schedule a background timer. |
Jintack Lim | fb280e9 | 2017-02-03 10:20:05 -0500 | [diff] [blame] | 415 | * Set the earliest expiration time among the guest timers. |
| 416 | */ |
Christoffer Dall | 14d61fa | 2017-06-17 07:33:02 -0700 | [diff] [blame] | 417 | soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); |
Christoffer Dall | d35268d | 2015-08-25 19:48:21 +0200 | [diff] [blame] | 418 | } |
| 419 | |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 420 | static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) |
| 421 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 422 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 423 | |
| 424 | soft_timer_cancel(&timer->bg_timer); |
| 425 | } |
| 426 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 427 | static void timer_restore_state(struct arch_timer_context *ctx) |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 428 | { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 429 | struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); |
| 430 | enum kvm_arch_timers index = arch_timer_ctx_index(ctx); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 431 | unsigned long flags; |
| 432 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 433 | if (!timer->enabled) |
| 434 | return; |
| 435 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 436 | local_irq_save(flags); |
| 437 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 438 | if (ctx->loaded) |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 439 | goto out; |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 440 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 441 | switch (index) { |
| 442 | case TIMER_VTIMER: |
| 443 | write_sysreg_el0(ctx->cnt_cval, cntv_cval); |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 444 | isb(); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 445 | write_sysreg_el0(ctx->cnt_ctl, cntv_ctl); |
| 446 | break; |
| 447 | case TIMER_PTIMER: |
| 448 | write_sysreg_el0(ctx->cnt_cval, cntp_cval); |
| 449 | isb(); |
| 450 | write_sysreg_el0(ctx->cnt_ctl, cntp_ctl); |
| 451 | break; |
| 452 | case NR_KVM_TIMERS: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 453 | BUG(); |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 454 | } |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 455 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 456 | trace_kvm_timer_restore_state(ctx); |
| 457 | |
| 458 | ctx->loaded = true; |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 459 | out: |
| 460 | local_irq_restore(flags); |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 461 | } |
| 462 | |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 463 | static void set_cntvoff(u64 cntvoff) |
| 464 | { |
| 465 | u32 low = lower_32_bits(cntvoff); |
| 466 | u32 high = upper_32_bits(cntvoff); |
| 467 | |
| 468 | /* |
| 469 | * Since kvm_call_hyp doesn't fully support the ARM PCS especially on |
| 470 | * 32-bit systems, but rather passes register by register shifted one |
| 471 | * place (we put the function address in r0/x0), we cannot simply pass |
| 472 | * a 64-bit value as an argument, but have to split the value in two |
| 473 | * 32-bit halves. |
| 474 | */ |
| 475 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); |
| 476 | } |
| 477 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 478 | static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active) |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 479 | { |
| 480 | int r; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 481 | r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active); |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 482 | WARN_ON(r); |
| 483 | } |
| 484 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 485 | static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 486 | { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 487 | struct kvm_vcpu *vcpu = ctx->vcpu; |
Marc Zyngier | bae561c | 2019-01-20 20:32:31 +0000 | [diff] [blame] | 488 | bool phys_active = false; |
| 489 | |
| 490 | /* |
| 491 | * Update the timer output so that it is likely to match the |
| 492 | * state we're about to restore. If the timer expires between |
| 493 | * this point and the register restoration, we'll take the |
| 494 | * interrupt anyway. |
| 495 | */ |
| 496 | kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 497 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 498 | if (irqchip_in_kernel(vcpu->kvm)) |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 499 | phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq); |
Marc Zyngier | bae561c | 2019-01-20 20:32:31 +0000 | [diff] [blame] | 500 | |
| 501 | phys_active |= ctx->irq.level; |
| 502 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 503 | set_timer_irq_phys_active(ctx, phys_active); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 504 | } |
Marc Zyngier | 9b4a300 | 2016-01-29 19:04:48 +0000 | [diff] [blame] | 505 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 506 | static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 507 | { |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 508 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 509 | |
| 510 | /* |
Christoffer Dall | 6bc2100 | 2019-04-25 13:57:40 +0100 | [diff] [blame] | 511 | * Update the timer output so that it is likely to match the |
| 512 | * state we're about to restore. If the timer expires between |
| 513 | * this point and the register restoration, we'll take the |
| 514 | * interrupt anyway. |
| 515 | */ |
| 516 | kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); |
| 517 | |
| 518 | /* |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 519 | * When using a userspace irqchip with the architected timers and a |
| 520 | * host interrupt controller that doesn't support an active state, we |
| 521 | * must still prevent continuously exiting from the guest, and |
| 522 | * therefore mask the physical interrupt by disabling it on the host |
| 523 | * interrupt controller when the virtual level is high, such that the |
| 524 | * guest can make forward progress. Once we detect the output level |
| 525 | * being de-asserted, we unmask the interrupt again so that we exit |
| 526 | * from the guest when the timer fires. |
| 527 | */ |
| 528 | if (vtimer->irq.level) |
| 529 | disable_percpu_irq(host_vtimer_irq); |
| 530 | else |
| 531 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 532 | } |
| 533 | |
| 534 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) |
| 535 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 536 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 537 | struct timer_map map; |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 538 | |
| 539 | if (unlikely(!timer->enabled)) |
| 540 | return; |
| 541 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 542 | get_timer_map(vcpu, &map); |
| 543 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 544 | if (static_branch_likely(&has_gic_active_state)) { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 545 | kvm_timer_vcpu_load_gic(map.direct_vtimer); |
| 546 | if (map.direct_ptimer) |
| 547 | kvm_timer_vcpu_load_gic(map.direct_ptimer); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 548 | } else { |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 549 | kvm_timer_vcpu_load_nogic(vcpu); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 550 | } |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 551 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 552 | set_cntvoff(map.direct_vtimer->cntvoff); |
Christoffer Dall | 245715c | 2018-07-25 10:21:28 +0100 | [diff] [blame] | 553 | |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 554 | kvm_timer_unblocking(vcpu); |
| 555 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 556 | timer_restore_state(map.direct_vtimer); |
| 557 | if (map.direct_ptimer) |
| 558 | timer_restore_state(map.direct_ptimer); |
| 559 | |
| 560 | if (map.emul_ptimer) |
| 561 | timer_emulate(map.emul_ptimer); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 562 | } |
| 563 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 564 | bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) |
| 565 | { |
| 566 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 567 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
| 568 | struct kvm_sync_regs *sregs = &vcpu->run->s.regs; |
| 569 | bool vlevel, plevel; |
| 570 | |
| 571 | if (likely(irqchip_in_kernel(vcpu->kvm))) |
| 572 | return false; |
| 573 | |
| 574 | vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; |
| 575 | plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; |
| 576 | |
Christoffer Dall | 13e59ec | 2018-01-25 14:20:19 +0100 | [diff] [blame] | 577 | return kvm_timer_should_fire(vtimer) != vlevel || |
| 578 | kvm_timer_should_fire(ptimer) != plevel; |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 579 | } |
| 580 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 581 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) |
| 582 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 583 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 584 | struct timer_map map; |
Christoffer Dall | 688c50a | 2017-01-04 16:10:28 +0100 | [diff] [blame] | 585 | |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 586 | if (unlikely(!timer->enabled)) |
| 587 | return; |
| 588 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 589 | get_timer_map(vcpu, &map); |
| 590 | |
| 591 | timer_save_state(map.direct_vtimer); |
| 592 | if (map.direct_ptimer) |
| 593 | timer_save_state(map.direct_ptimer); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 594 | |
| 595 | /* |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 596 | * Cancel soft timer emulation, because the only case where we |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 597 | * need it after a vcpu_put is in the context of a sleeping VCPU, and |
| 598 | * in that case we already factor in the deadline for the physical |
| 599 | * timer when scheduling the bg_timer. |
| 600 | * |
| 601 | * In any case, we re-schedule the hrtimer for the physical timer when |
| 602 | * coming back to the VCPU thread in kvm_timer_vcpu_load(). |
| 603 | */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 604 | if (map.emul_ptimer) |
| 605 | soft_timer_cancel(&map.emul_ptimer->hrtimer); |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 606 | |
Christoffer Dall | accb99b | 2018-11-26 18:21:22 +0100 | [diff] [blame] | 607 | if (swait_active(kvm_arch_vcpu_wq(vcpu))) |
| 608 | kvm_timer_blocking(vcpu); |
| 609 | |
Christoffer Dall | bbdd52c | 2017-06-18 01:42:55 -0700 | [diff] [blame] | 610 | /* |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 611 | * The kernel may decide to run userspace after calling vcpu_put, so |
| 612 | * we reset cntvoff to 0 to ensure a consistent read between user |
| 613 | * accesses to the virtual counter and kernel access to the physical |
Shanker Donthineni | 250be9d | 2018-02-19 09:38:07 -0600 | [diff] [blame] | 614 | * counter of non-VHE case. For VHE, the virtual counter uses a fixed |
| 615 | * virtual offset of zero, so no need to zero CNTVOFF_EL2 register. |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 616 | */ |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 617 | set_cntvoff(0); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 618 | } |
| 619 | |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 620 | /* |
| 621 | * With a userspace irqchip we have to check if the guest de-asserted the |
| 622 | * timer and if so, unmask the timer irq signal on the host interrupt |
| 623 | * controller to ensure that we see future timer signals. |
| 624 | */ |
| 625 | static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 626 | { |
| 627 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 628 | |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 629 | if (!kvm_timer_should_fire(vtimer)) { |
| 630 | kvm_timer_update_irq(vcpu, false, vtimer); |
| 631 | if (static_branch_likely(&has_gic_active_state)) |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 632 | set_timer_irq_phys_active(vtimer, false); |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 633 | else |
| 634 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
Christoffer Dall | b103cc3 | 2016-10-16 20:30:38 +0200 | [diff] [blame] | 635 | } |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 636 | } |
| 637 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 638 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
| 639 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 640 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 641 | |
| 642 | if (unlikely(!timer->enabled)) |
| 643 | return; |
| 644 | |
| 645 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) |
| 646 | unmask_vtimer_irq_user(vcpu); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 647 | } |
| 648 | |
Christoffer Dall | 85e69ad | 2017-05-02 20:14:06 +0200 | [diff] [blame] | 649 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
Anup Patel | 5ae7f87 | 2013-04-30 12:02:15 +0530 | [diff] [blame] | 650 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 651 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 652 | struct timer_map map; |
| 653 | |
| 654 | get_timer_map(vcpu, &map); |
Anup Patel | 5ae7f87 | 2013-04-30 12:02:15 +0530 | [diff] [blame] | 655 | |
| 656 | /* |
Christoffer Dall | 4ad9e16 | 2015-09-04 16:24:39 +0200 | [diff] [blame] | 657 | * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 |
| 658 | * and to 0 for ARMv7. We provide an implementation that always |
| 659 | * resets the timer to be disabled and unmasked and is compliant with |
| 660 | * the ARMv7 architecture. |
| 661 | */ |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 662 | vcpu_vtimer(vcpu)->cnt_ctl = 0; |
| 663 | vcpu_ptimer(vcpu)->cnt_ctl = 0; |
Christoffer Dall | 4ad9e16 | 2015-09-04 16:24:39 +0200 | [diff] [blame] | 664 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 665 | if (timer->enabled) { |
| 666 | kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); |
| 667 | kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); |
| 668 | |
| 669 | if (irqchip_in_kernel(vcpu->kvm)) { |
| 670 | kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); |
| 671 | if (map.direct_ptimer) |
| 672 | kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); |
| 673 | } |
| 674 | } |
| 675 | |
| 676 | if (map.emul_ptimer) |
| 677 | soft_timer_cancel(&map.emul_ptimer->hrtimer); |
Christoffer Dall | 413aa80 | 2018-03-05 11:36:38 +0100 | [diff] [blame] | 678 | |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 679 | return 0; |
Anup Patel | 5ae7f87 | 2013-04-30 12:02:15 +0530 | [diff] [blame] | 680 | } |
| 681 | |
Jintack Lim | 90de943 | 2017-02-03 10:20:00 -0500 | [diff] [blame] | 682 | /* Make the updates of cntvoff for all vtimer contexts atomic */ |
| 683 | static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) |
| 684 | { |
| 685 | int i; |
| 686 | struct kvm *kvm = vcpu->kvm; |
| 687 | struct kvm_vcpu *tmp; |
| 688 | |
| 689 | mutex_lock(&kvm->lock); |
| 690 | kvm_for_each_vcpu(i, tmp, kvm) |
| 691 | vcpu_vtimer(tmp)->cntvoff = cntvoff; |
| 692 | |
| 693 | /* |
| 694 | * When called from the vcpu create path, the CPU being created is not |
| 695 | * included in the loop above, so we just set it here as well. |
| 696 | */ |
| 697 | vcpu_vtimer(vcpu)->cntvoff = cntvoff; |
| 698 | mutex_unlock(&kvm->lock); |
| 699 | } |
| 700 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 701 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) |
| 702 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 703 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | 85e69ad | 2017-05-02 20:14:06 +0200 | [diff] [blame] | 704 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 705 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 706 | |
Jintack Lim | 90de943 | 2017-02-03 10:20:00 -0500 | [diff] [blame] | 707 | /* Synchronize cntvoff across all vtimers of a VM. */ |
| 708 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 709 | ptimer->cntvoff = 0; |
Jintack Lim | 90de943 | 2017-02-03 10:20:00 -0500 | [diff] [blame] | 710 | |
Christoffer Dall | 14d61fa | 2017-06-17 07:33:02 -0700 | [diff] [blame] | 711 | hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 712 | timer->bg_timer.function = kvm_bg_timer_expire; |
Christoffer Dall | 85e69ad | 2017-05-02 20:14:06 +0200 | [diff] [blame] | 713 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 714 | hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 715 | hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 716 | vtimer->hrtimer.function = kvm_hrtimer_expire; |
| 717 | ptimer->hrtimer.function = kvm_hrtimer_expire; |
Christoffer Dall | f2a2129 | 2017-06-18 00:32:08 -0700 | [diff] [blame] | 718 | |
Christoffer Dall | 85e69ad | 2017-05-02 20:14:06 +0200 | [diff] [blame] | 719 | vtimer->irq.irq = default_vtimer_irq.irq; |
| 720 | ptimer->irq.irq = default_ptimer_irq.irq; |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 721 | |
| 722 | vtimer->host_timer_irq = host_vtimer_irq; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 723 | ptimer->host_timer_irq = host_ptimer_irq; |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 724 | |
| 725 | vtimer->host_timer_irq_flags = host_vtimer_irq_flags; |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 726 | ptimer->host_timer_irq_flags = host_ptimer_irq_flags; |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 727 | |
| 728 | vtimer->vcpu = vcpu; |
| 729 | ptimer->vcpu = vcpu; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | static void kvm_timer_init_interrupt(void *info) |
| 733 | { |
Marc Zyngier | cabdc5c | 2016-08-16 15:03:02 +0100 | [diff] [blame] | 734 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 735 | enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 736 | } |
| 737 | |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 738 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) |
| 739 | { |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 740 | struct arch_timer_context *timer; |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 741 | |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 742 | switch (regid) { |
| 743 | case KVM_REG_ARM_TIMER_CTL: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 744 | timer = vcpu_vtimer(vcpu); |
| 745 | kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 746 | break; |
| 747 | case KVM_REG_ARM_TIMER_CNT: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 748 | timer = vcpu_vtimer(vcpu); |
Jintack Lim | 90de943 | 2017-02-03 10:20:00 -0500 | [diff] [blame] | 749 | update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 750 | break; |
| 751 | case KVM_REG_ARM_TIMER_CVAL: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 752 | timer = vcpu_vtimer(vcpu); |
| 753 | kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 754 | break; |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 755 | case KVM_REG_ARM_PTIMER_CTL: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 756 | timer = vcpu_ptimer(vcpu); |
| 757 | kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 758 | break; |
| 759 | case KVM_REG_ARM_PTIMER_CVAL: |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 760 | timer = vcpu_ptimer(vcpu); |
| 761 | kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 762 | break; |
| 763 | |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 764 | default: |
| 765 | return -1; |
| 766 | } |
Christoffer Dall | 4b4b451 | 2015-08-30 15:01:27 +0200 | [diff] [blame] | 767 | |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 768 | return 0; |
| 769 | } |
| 770 | |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 771 | static u64 read_timer_ctl(struct arch_timer_context *timer) |
| 772 | { |
| 773 | /* |
| 774 | * Set ISTATUS bit if it's expired. |
| 775 | * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is |
| 776 | * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit |
| 777 | * regardless of ENABLE bit for our implementation convenience. |
| 778 | */ |
| 779 | if (!kvm_timer_compute_delta(timer)) |
| 780 | return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; |
| 781 | else |
| 782 | return timer->cnt_ctl; |
| 783 | } |
| 784 | |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 785 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) |
| 786 | { |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 787 | switch (regid) { |
| 788 | case KVM_REG_ARM_TIMER_CTL: |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 789 | return kvm_arm_timer_read(vcpu, |
| 790 | vcpu_vtimer(vcpu), TIMER_REG_CTL); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 791 | case KVM_REG_ARM_TIMER_CNT: |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 792 | return kvm_arm_timer_read(vcpu, |
| 793 | vcpu_vtimer(vcpu), TIMER_REG_CNT); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 794 | case KVM_REG_ARM_TIMER_CVAL: |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 795 | return kvm_arm_timer_read(vcpu, |
| 796 | vcpu_vtimer(vcpu), TIMER_REG_CVAL); |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 797 | case KVM_REG_ARM_PTIMER_CTL: |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 798 | return kvm_arm_timer_read(vcpu, |
| 799 | vcpu_ptimer(vcpu), TIMER_REG_CTL); |
Christoffer Dall | 5c5196d | 2017-06-16 23:08:57 -0700 | [diff] [blame] | 800 | case KVM_REG_ARM_PTIMER_CNT: |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 801 | return kvm_arm_timer_read(vcpu, |
| 802 | vcpu_vtimer(vcpu), TIMER_REG_CNT); |
| 803 | case KVM_REG_ARM_PTIMER_CVAL: |
| 804 | return kvm_arm_timer_read(vcpu, |
| 805 | vcpu_ptimer(vcpu), TIMER_REG_CVAL); |
Andre Przywara | 39735a3 | 2013-12-13 14:23:26 +0100 | [diff] [blame] | 806 | } |
| 807 | return (u64)-1; |
| 808 | } |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 809 | |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 810 | static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, |
| 811 | struct arch_timer_context *timer, |
| 812 | enum kvm_arch_timer_regs treg) |
| 813 | { |
| 814 | u64 val; |
| 815 | |
| 816 | switch (treg) { |
| 817 | case TIMER_REG_TVAL: |
Wei Huang | 8fa7616 | 2019-03-29 15:12:53 -0500 | [diff] [blame] | 818 | val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 819 | break; |
| 820 | |
| 821 | case TIMER_REG_CTL: |
| 822 | val = read_timer_ctl(timer); |
| 823 | break; |
| 824 | |
| 825 | case TIMER_REG_CVAL: |
| 826 | val = timer->cnt_cval; |
| 827 | break; |
| 828 | |
| 829 | case TIMER_REG_CNT: |
| 830 | val = kvm_phys_timer_read() - timer->cntvoff; |
| 831 | break; |
| 832 | |
| 833 | default: |
| 834 | BUG(); |
| 835 | } |
| 836 | |
| 837 | return val; |
| 838 | } |
| 839 | |
| 840 | u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, |
| 841 | enum kvm_arch_timers tmr, |
| 842 | enum kvm_arch_timer_regs treg) |
| 843 | { |
| 844 | u64 val; |
| 845 | |
| 846 | preempt_disable(); |
| 847 | kvm_timer_vcpu_put(vcpu); |
| 848 | |
| 849 | val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg); |
| 850 | |
| 851 | kvm_timer_vcpu_load(vcpu); |
| 852 | preempt_enable(); |
| 853 | |
| 854 | return val; |
| 855 | } |
| 856 | |
| 857 | static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, |
| 858 | struct arch_timer_context *timer, |
| 859 | enum kvm_arch_timer_regs treg, |
| 860 | u64 val) |
| 861 | { |
| 862 | switch (treg) { |
| 863 | case TIMER_REG_TVAL: |
Wei Huang | 8fa7616 | 2019-03-29 15:12:53 -0500 | [diff] [blame] | 864 | timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val; |
Andre Przywara | 84135d3 | 2018-07-05 16:48:23 +0100 | [diff] [blame] | 865 | break; |
| 866 | |
| 867 | case TIMER_REG_CTL: |
| 868 | timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT; |
| 869 | break; |
| 870 | |
| 871 | case TIMER_REG_CVAL: |
| 872 | timer->cnt_cval = val; |
| 873 | break; |
| 874 | |
| 875 | default: |
| 876 | BUG(); |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, |
| 881 | enum kvm_arch_timers tmr, |
| 882 | enum kvm_arch_timer_regs treg, |
| 883 | u64 val) |
| 884 | { |
| 885 | preempt_disable(); |
| 886 | kvm_timer_vcpu_put(vcpu); |
| 887 | |
| 888 | kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val); |
| 889 | |
| 890 | kvm_timer_vcpu_load(vcpu); |
| 891 | preempt_enable(); |
| 892 | } |
| 893 | |
Richard Cochran | b3c9950 | 2016-07-13 17:16:47 +0000 | [diff] [blame] | 894 | static int kvm_timer_starting_cpu(unsigned int cpu) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 895 | { |
Richard Cochran | b3c9950 | 2016-07-13 17:16:47 +0000 | [diff] [blame] | 896 | kvm_timer_init_interrupt(NULL); |
| 897 | return 0; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 898 | } |
| 899 | |
Richard Cochran | b3c9950 | 2016-07-13 17:16:47 +0000 | [diff] [blame] | 900 | static int kvm_timer_dying_cpu(unsigned int cpu) |
| 901 | { |
| 902 | disable_percpu_irq(host_vtimer_irq); |
| 903 | return 0; |
| 904 | } |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 905 | |
Marc Zyngier | f384dcf | 2017-12-07 11:46:15 +0000 | [diff] [blame] | 906 | int kvm_timer_hyp_init(bool has_gic) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 907 | { |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 908 | struct arch_timer_kvm_info *info; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 909 | int err; |
| 910 | |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 911 | info = arch_timer_get_kvm_info(); |
| 912 | timecounter = &info->timecounter; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 913 | |
Christoffer Dall | 8e1a047 | 2016-12-05 10:32:11 +0100 | [diff] [blame] | 914 | if (!timecounter->cc) { |
| 915 | kvm_err("kvm_arch_timer: uninitialized timecounter\n"); |
| 916 | return -ENODEV; |
| 917 | } |
| 918 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 919 | /* First, do the virtual EL1 timer irq */ |
| 920 | |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 921 | if (info->virtual_irq <= 0) { |
| 922 | kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", |
| 923 | info->virtual_irq); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 924 | return -ENODEV; |
| 925 | } |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 926 | host_vtimer_irq = info->virtual_irq; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 927 | |
Marc Zyngier | cabdc5c | 2016-08-16 15:03:02 +0100 | [diff] [blame] | 928 | host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq); |
| 929 | if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH && |
| 930 | host_vtimer_irq_flags != IRQF_TRIGGER_LOW) { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 931 | kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n", |
Marc Zyngier | cabdc5c | 2016-08-16 15:03:02 +0100 | [diff] [blame] | 932 | host_vtimer_irq); |
| 933 | host_vtimer_irq_flags = IRQF_TRIGGER_LOW; |
| 934 | } |
| 935 | |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 936 | err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 937 | "kvm guest vtimer", kvm_get_running_vcpus()); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 938 | if (err) { |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 939 | kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n", |
Julien Grall | 29c2d6f | 2016-04-11 16:32:58 +0100 | [diff] [blame] | 940 | host_vtimer_irq, err); |
Paolo Bonzini | 5d947a1 | 2016-09-08 12:45:59 +0200 | [diff] [blame] | 941 | return err; |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 942 | } |
| 943 | |
Marc Zyngier | f384dcf | 2017-12-07 11:46:15 +0000 | [diff] [blame] | 944 | if (has_gic) { |
| 945 | err = irq_set_vcpu_affinity(host_vtimer_irq, |
| 946 | kvm_get_running_vcpus()); |
| 947 | if (err) { |
| 948 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); |
| 949 | goto out_free_irq; |
| 950 | } |
Christoffer Dall | d60d8b6 | 2018-01-26 16:06:51 +0100 | [diff] [blame] | 951 | |
| 952 | static_branch_enable(&has_gic_active_state); |
Christoffer Dall | 40f4cba | 2017-07-05 12:50:27 +0200 | [diff] [blame] | 953 | } |
| 954 | |
Ard Biesheuvel | 7660042 | 2018-03-02 08:16:30 +0000 | [diff] [blame] | 955 | kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 956 | |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 957 | /* Now let's do the physical EL1 timer irq */ |
| 958 | |
| 959 | if (info->physical_irq > 0) { |
| 960 | host_ptimer_irq = info->physical_irq; |
| 961 | host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq); |
| 962 | if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH && |
| 963 | host_ptimer_irq_flags != IRQF_TRIGGER_LOW) { |
| 964 | kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n", |
| 965 | host_ptimer_irq); |
| 966 | host_ptimer_irq_flags = IRQF_TRIGGER_LOW; |
| 967 | } |
| 968 | |
| 969 | err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler, |
| 970 | "kvm guest ptimer", kvm_get_running_vcpus()); |
| 971 | if (err) { |
| 972 | kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n", |
| 973 | host_ptimer_irq, err); |
| 974 | return err; |
| 975 | } |
| 976 | |
| 977 | if (has_gic) { |
| 978 | err = irq_set_vcpu_affinity(host_ptimer_irq, |
| 979 | kvm_get_running_vcpus()); |
| 980 | if (err) { |
| 981 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); |
| 982 | goto out_free_irq; |
| 983 | } |
| 984 | } |
| 985 | |
| 986 | kvm_debug("physical timer IRQ%d\n", host_ptimer_irq); |
| 987 | } else if (has_vhe()) { |
| 988 | kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n", |
| 989 | info->physical_irq); |
| 990 | err = -ENODEV; |
| 991 | goto out_free_irq; |
| 992 | } |
| 993 | |
Richard Cochran | b3c9950 | 2016-07-13 17:16:47 +0000 | [diff] [blame] | 994 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 995 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
Richard Cochran | b3c9950 | 2016-07-13 17:16:47 +0000 | [diff] [blame] | 996 | kvm_timer_dying_cpu); |
Christoffer Dall | 40f4cba | 2017-07-05 12:50:27 +0200 | [diff] [blame] | 997 | return 0; |
| 998 | out_free_irq: |
| 999 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 1000 | return err; |
| 1001 | } |
| 1002 | |
| 1003 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) |
| 1004 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 1005 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 1006 | |
Christoffer Dall | 8a411b0 | 2018-11-27 13:48:08 +0100 | [diff] [blame] | 1007 | soft_timer_cancel(&timer->bg_timer); |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 1008 | } |
| 1009 | |
Christoffer Dall | abcb851 | 2017-05-04 13:32:53 +0200 | [diff] [blame] | 1010 | static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1011 | { |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1012 | int vtimer_irq, ptimer_irq; |
Christoffer Dall | abcb851 | 2017-05-04 13:32:53 +0200 | [diff] [blame] | 1013 | int i, ret; |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1014 | |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1015 | vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; |
Christoffer Dall | abcb851 | 2017-05-04 13:32:53 +0200 | [diff] [blame] | 1016 | ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); |
| 1017 | if (ret) |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1018 | return false; |
| 1019 | |
Christoffer Dall | abcb851 | 2017-05-04 13:32:53 +0200 | [diff] [blame] | 1020 | ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; |
| 1021 | ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); |
| 1022 | if (ret) |
| 1023 | return false; |
| 1024 | |
| 1025 | kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1026 | if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || |
| 1027 | vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) |
| 1028 | return false; |
| 1029 | } |
| 1030 | |
| 1031 | return true; |
| 1032 | } |
| 1033 | |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 1034 | bool kvm_arch_timer_get_input_level(int vintid) |
| 1035 | { |
| 1036 | struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu(); |
| 1037 | struct arch_timer_context *timer; |
| 1038 | |
| 1039 | if (vintid == vcpu_vtimer(vcpu)->irq.irq) |
| 1040 | timer = vcpu_vtimer(vcpu); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1041 | else if (vintid == vcpu_ptimer(vcpu)->irq.irq) |
| 1042 | timer = vcpu_ptimer(vcpu); |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 1043 | else |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1044 | BUG(); |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 1045 | |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 1046 | return kvm_timer_should_fire(timer); |
| 1047 | } |
| 1048 | |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 1049 | int kvm_timer_enable(struct kvm_vcpu *vcpu) |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 1050 | { |
Christoffer Dall | e604dd5 | 2018-09-18 10:08:18 -0700 | [diff] [blame] | 1051 | struct arch_timer_cpu *timer = vcpu_timer(vcpu); |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 1052 | struct timer_map map; |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 1053 | int ret; |
| 1054 | |
| 1055 | if (timer->enabled) |
| 1056 | return 0; |
| 1057 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 1058 | /* Without a VGIC we do not map virtual IRQs to physical IRQs */ |
| 1059 | if (!irqchip_in_kernel(vcpu->kvm)) |
| 1060 | goto no_vgic; |
| 1061 | |
| 1062 | if (!vgic_initialized(vcpu->kvm)) |
| 1063 | return -ENODEV; |
| 1064 | |
Christoffer Dall | abcb851 | 2017-05-04 13:32:53 +0200 | [diff] [blame] | 1065 | if (!timer_irqs_are_valid(vcpu)) { |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1066 | kvm_debug("incorrectly configured timer irqs\n"); |
| 1067 | return -EINVAL; |
| 1068 | } |
| 1069 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 1070 | get_timer_map(vcpu, &map); |
| 1071 | |
| 1072 | ret = kvm_vgic_map_phys_irq(vcpu, |
| 1073 | map.direct_vtimer->host_timer_irq, |
| 1074 | map.direct_vtimer->irq.irq, |
Christoffer Dall | 4c60e36 | 2017-10-27 19:34:30 +0200 | [diff] [blame] | 1075 | kvm_arch_timer_get_input_level); |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 1076 | if (ret) |
| 1077 | return ret; |
| 1078 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 1079 | if (map.direct_ptimer) { |
| 1080 | ret = kvm_vgic_map_phys_irq(vcpu, |
| 1081 | map.direct_ptimer->host_timer_irq, |
| 1082 | map.direct_ptimer->irq.irq, |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1083 | kvm_arch_timer_get_input_level); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1084 | } |
| 1085 | |
Christoffer Dall | bee038a6 | 2019-01-04 13:31:22 +0100 | [diff] [blame] | 1086 | if (ret) |
| 1087 | return ret; |
| 1088 | |
Alexander Graf | d9e1397 | 2016-09-27 21:08:06 +0200 | [diff] [blame] | 1089 | no_vgic: |
Longpeng(Mike) | fd5ebf9 | 2016-11-09 10:50:14 +0800 | [diff] [blame] | 1090 | timer->enabled = 1; |
Christoffer Dall | 41a5448 | 2016-05-18 16:26:00 +0100 | [diff] [blame] | 1091 | return 0; |
Christoffer Dall | 0597112 | 2014-12-12 21:19:23 +0100 | [diff] [blame] | 1092 | } |
| 1093 | |
Jintack Lim | 488f94d | 2016-12-01 14:32:05 -0500 | [diff] [blame] | 1094 | /* |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1095 | * On VHE system, we only need to configure the EL2 timer trap register once, |
| 1096 | * not for every world switch. |
Jintack Lim | 488f94d | 2016-12-01 14:32:05 -0500 | [diff] [blame] | 1097 | * The host kernel runs at EL2 with HCR_EL2.TGE == 1, |
| 1098 | * and this makes those bits have no effect for the host kernel execution. |
| 1099 | */ |
| 1100 | void kvm_timer_init_vhe(void) |
| 1101 | { |
| 1102 | /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ |
| 1103 | u32 cnthctl_shift = 10; |
| 1104 | u64 val; |
| 1105 | |
| 1106 | /* |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1107 | * VHE systems allow the guest direct access to the EL1 physical |
| 1108 | * timer/counter. |
Jintack Lim | 488f94d | 2016-12-01 14:32:05 -0500 | [diff] [blame] | 1109 | */ |
| 1110 | val = read_sysreg(cnthctl_el2); |
Christoffer Dall | 9e01dc7 | 2019-02-19 14:04:30 +0100 | [diff] [blame] | 1111 | val |= (CNTHCTL_EL1PCEN << cnthctl_shift); |
Jintack Lim | 488f94d | 2016-12-01 14:32:05 -0500 | [diff] [blame] | 1112 | val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); |
| 1113 | write_sysreg(val, cnthctl_el2); |
| 1114 | } |
Christoffer Dall | 99a1db7 | 2017-05-02 20:19:15 +0200 | [diff] [blame] | 1115 | |
| 1116 | static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) |
| 1117 | { |
| 1118 | struct kvm_vcpu *vcpu; |
| 1119 | int i; |
| 1120 | |
| 1121 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 1122 | vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; |
| 1123 | vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; |
| 1124 | } |
| 1125 | } |
| 1126 | |
| 1127 | int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
| 1128 | { |
| 1129 | int __user *uaddr = (int __user *)(long)attr->addr; |
| 1130 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
| 1131 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
| 1132 | int irq; |
| 1133 | |
| 1134 | if (!irqchip_in_kernel(vcpu->kvm)) |
| 1135 | return -EINVAL; |
| 1136 | |
| 1137 | if (get_user(irq, uaddr)) |
| 1138 | return -EFAULT; |
| 1139 | |
| 1140 | if (!(irq_is_ppi(irq))) |
| 1141 | return -EINVAL; |
| 1142 | |
| 1143 | if (vcpu->arch.timer_cpu.enabled) |
| 1144 | return -EBUSY; |
| 1145 | |
| 1146 | switch (attr->attr) { |
| 1147 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: |
| 1148 | set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); |
| 1149 | break; |
| 1150 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: |
| 1151 | set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); |
| 1152 | break; |
| 1153 | default: |
| 1154 | return -ENXIO; |
| 1155 | } |
| 1156 | |
| 1157 | return 0; |
| 1158 | } |
| 1159 | |
| 1160 | int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
| 1161 | { |
| 1162 | int __user *uaddr = (int __user *)(long)attr->addr; |
| 1163 | struct arch_timer_context *timer; |
| 1164 | int irq; |
| 1165 | |
| 1166 | switch (attr->attr) { |
| 1167 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: |
| 1168 | timer = vcpu_vtimer(vcpu); |
| 1169 | break; |
| 1170 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: |
| 1171 | timer = vcpu_ptimer(vcpu); |
| 1172 | break; |
| 1173 | default: |
| 1174 | return -ENXIO; |
| 1175 | } |
| 1176 | |
| 1177 | irq = timer->irq.irq; |
| 1178 | return put_user(irq, uaddr); |
| 1179 | } |
| 1180 | |
| 1181 | int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
| 1182 | { |
| 1183 | switch (attr->attr) { |
| 1184 | case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: |
| 1185 | case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: |
| 1186 | return 0; |
| 1187 | } |
| 1188 | |
| 1189 | return -ENXIO; |
| 1190 | } |