blob: a07f90acdaecb4af617326912a7a2edab1c83425 [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __KVM_ARM_VGIC_MMIO_H__
17#define __KVM_ARM_VGIC_MMIO_H__
18
19struct vgic_register_region {
20 unsigned int reg_offset;
21 unsigned int len;
22 unsigned int bits_per_irq;
23 unsigned int access_flags;
Andre Przywara59c5ab42016-07-15 12:43:30 +010024 union {
25 unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
26 unsigned int len);
27 unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
28 gpa_t addr, unsigned int len);
29 };
30 union {
31 void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
32 unsigned int len, unsigned long val);
33 void (*its_write)(struct kvm *kvm, struct vgic_its *its,
34 gpa_t addr, unsigned int len,
35 unsigned long val);
36 };
Vijaya Kumar K2df903a2017-01-26 19:50:46 +053037 unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len);
Eric Auger8331c232016-12-20 09:33:13 +010039 union {
Christoffer Dallc6e09172018-07-16 15:06:23 +020040 int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
41 unsigned int len, unsigned long val);
Eric Auger8331c232016-12-20 09:33:13 +010042 int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
43 gpa_t addr, unsigned int len,
44 unsigned long val);
45 };
Marc Zyngier4493b1c2016-04-26 11:06:12 +010046};
47
48extern struct kvm_io_device_ops kvm_io_gic_ops;
49
50#define VGIC_ACCESS_8bit 1
51#define VGIC_ACCESS_32bit 2
52#define VGIC_ACCESS_64bit 4
53
54/*
55 * Generate a mask that covers the number of bytes required to address
56 * up to 1024 interrupts, each represented by <bits> bits. This assumes
57 * that <bits> is a power of two.
58 */
59#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
60
61/*
Andre Przywara112b0b82016-11-01 18:00:08 +000062 * (addr & mask) gives us the _byte_ offset for the INT ID.
63 * We multiply this by 8 the get the _bit_ offset, then divide this by
64 * the number of bits to learn the actual INT ID.
65 * But instead of a division (which requires a "long long div" implementation),
66 * we shift by the binary logarithm of <bits>.
67 * This assumes that <bits> is a power of two.
Marc Zyngier4493b1c2016-04-26 11:06:12 +010068 */
69#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
Andre Przywara112b0b82016-11-01 18:00:08 +000070 8 >> ilog2(bits))
Marc Zyngier4493b1c2016-04-26 11:06:12 +010071
72/*
73 * Some VGIC registers store per-IRQ information, with a different number
74 * of bits per IRQ. For those registers this macro is used.
75 * The _WITH_LENGTH version instantiates registers with a fixed length
76 * and is mutually exclusive with the _PER_IRQ version.
77 */
Christoffer Dall26020872017-05-16 09:31:58 +020078#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \
Marc Zyngier4493b1c2016-04-26 11:06:12 +010079 { \
80 .reg_offset = off, \
81 .bits_per_irq = bpi, \
82 .len = bpi * 1024 / 8, \
83 .access_flags = acc, \
84 .read = rd, \
85 .write = wr, \
Christoffer Dall26020872017-05-16 09:31:58 +020086 .uaccess_read = ur, \
87 .uaccess_write = uw, \
Marc Zyngier4493b1c2016-04-26 11:06:12 +010088 }
89
90#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
91 { \
92 .reg_offset = off, \
93 .bits_per_irq = 0, \
94 .len = length, \
95 .access_flags = acc, \
96 .read = rd, \
97 .write = wr, \
98 }
99
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530100#define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
101 { \
102 .reg_offset = off, \
103 .bits_per_irq = 0, \
104 .len = length, \
105 .access_flags = acc, \
106 .read = rd, \
107 .write = wr, \
108 .uaccess_read = urd, \
109 .uaccess_write = uwr, \
110 }
111
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100112int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
113 struct vgic_register_region *reg_desc,
114 struct vgic_io_device *region,
115 int nr_irqs, bool offset_private);
116
117unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
118
119void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
120 unsigned long data);
121
Vladimir Murzind7d0a112016-09-12 15:49:20 +0100122unsigned long extract_bytes(u64 data, unsigned int offset,
Andre Przywara424c3382016-07-15 12:43:32 +0100123 unsigned int num);
124
125u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
126 unsigned long val);
127
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100128unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
129 gpa_t addr, unsigned int len);
130
131unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
132 gpa_t addr, unsigned int len);
133
134void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
135 unsigned int len, unsigned long val);
136
Christoffer Dallc6e09172018-07-16 15:06:23 +0200137int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
138 unsigned int len, unsigned long val);
139
Christoffer Dalld53c2c292018-07-16 15:06:25 +0200140unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
141 unsigned int len);
142
143void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
144 unsigned int len, unsigned long val);
145
Andre Przywarafd122e62015-12-01 14:33:05 +0000146unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
147 gpa_t addr, unsigned int len);
148
149void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
150 gpa_t addr, unsigned int len,
151 unsigned long val);
152
153void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
154 gpa_t addr, unsigned int len,
155 unsigned long val);
156
Andre Przywara96b29802015-12-01 14:33:41 +0000157unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
158 gpa_t addr, unsigned int len);
159
160void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
161 gpa_t addr, unsigned int len,
162 unsigned long val);
163
164void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
165 gpa_t addr, unsigned int len,
166 unsigned long val);
167
Andre Przywara69b6fe02015-12-01 12:40:58 +0000168unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
169 gpa_t addr, unsigned int len);
170
171void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
172 gpa_t addr, unsigned int len,
173 unsigned long val);
174
175void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
176 gpa_t addr, unsigned int len,
177 unsigned long val);
Andre Przywara96b29802015-12-01 14:33:41 +0000178
Christoffer Dallc6e09172018-07-16 15:06:23 +0200179int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
180 gpa_t addr, unsigned int len,
181 unsigned long val);
Christoffer Dall31971912017-05-16 09:44:39 +0200182
Christoffer Dallc6e09172018-07-16 15:06:23 +0200183int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
184 gpa_t addr, unsigned int len,
185 unsigned long val);
Christoffer Dall31971912017-05-16 09:44:39 +0200186
Andre Przywara055658b2015-12-01 14:34:02 +0000187unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
188 gpa_t addr, unsigned int len);
189
190void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
191 gpa_t addr, unsigned int len,
192 unsigned long val);
193
Andre Przywara79717e42015-12-01 12:41:31 +0000194unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
195 gpa_t addr, unsigned int len);
196
197void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
198 gpa_t addr, unsigned int len,
199 unsigned long val);
200
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530201int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
202 bool is_write, int offset, u32 *val);
203
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530204u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
205
206void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
207 const u64 val);
208
Andre Przywarafb848db2016-04-26 21:32:49 +0100209unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
210
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000211unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
212
Andre Przywara0aa1de52016-07-15 12:43:29 +0100213u64 vgic_sanitise_outer_cacheability(u64 reg);
214u64 vgic_sanitise_inner_cacheability(u64 reg);
215u64 vgic_sanitise_shareability(u64 reg);
216u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
217 u64 (*sanitise_fn)(u64));
Andre Przywara0aa1de52016-07-15 12:43:29 +0100218
Eric Auger4b7171a2016-12-20 09:20:00 +0100219/* Find the proper register handler entry given a certain address offset */
220const struct vgic_register_region *
221vgic_find_mmio_region(const struct vgic_register_region *regions,
222 int nr_regions, unsigned int offset);
223
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100224#endif