blob: 3a0cb0bb05933bd72277df67af0cfe2254a7b6e4 [file] [log] [blame]
Yinghai Lu5aeecaf2008-08-19 20:49:59 -07001#include <linux/interrupt.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07002#include <linux/dmar.h>
Suresh Siddha2ae21012008-07-10 11:16:43 -07003#include <linux/spinlock.h>
4#include <linux/jiffies.h>
5#include <linux/pci.h>
Suresh Siddhab6fcb332008-07-10 11:16:44 -07006#include <linux/irq.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -07007#include <asm/io_apic.h>
Yinghai Lu17483a12008-12-12 13:14:18 -08008#include <asm/smp.h>
Jaswinder Singh Rajput6d652ea2009-01-07 21:38:59 +05309#include <asm/cpu.h>
Kay, Allen M38717942008-09-09 18:37:29 +030010#include <linux/intel-iommu.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070011#include "intr_remapping.h"
Alexander Beregalov46f06b722009-04-06 16:45:28 +010012#include <acpi/acpi.h>
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -070013
14static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15static int ir_ioapic_num;
Suresh Siddha2ae21012008-07-10 11:16:43 -070016int intr_remapping_enabled;
17
Weidong Han03ea8152009-04-17 16:42:15 +080018static int disable_intremap;
19static __init int setup_nointremap(char *str)
20{
21 disable_intremap = 1;
22 return 0;
23}
24early_param("nointremap", setup_nointremap);
25
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070026struct irq_2_iommu {
Suresh Siddhab6fcb332008-07-10 11:16:44 -070027 struct intel_iommu *iommu;
28 u16 irte_index;
29 u16 sub_handle;
30 u8 irte_mask;
Yinghai Lu5aeecaf2008-08-19 20:49:59 -070031};
32
Yinghai Lud7e51e62009-01-07 15:03:13 -080033#ifdef CONFIG_GENERIC_HARDIRQS
Yinghai Lu85ac16d2009-04-27 18:00:38 -070034static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080035{
36 struct irq_2_iommu *iommu;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080037
38 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
Yinghai Lu85ac16d2009-04-27 18:00:38 -070039 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080040
41 return iommu;
42}
Yinghai Lue420dfb2008-08-19 20:50:21 -070043
44static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
45{
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080046 struct irq_desc *desc;
47
48 desc = irq_to_desc(irq);
49
50 if (WARN_ON_ONCE(!desc))
51 return NULL;
52
53 return desc->irq_2_iommu;
54}
55
Yinghai Lu85ac16d2009-04-27 18:00:38 -070056static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080057{
58 struct irq_desc *desc;
59 struct irq_2_iommu *irq_iommu;
60
61 /*
62 * alloc irq desc if not allocated already.
63 */
Yinghai Lu85ac16d2009-04-27 18:00:38 -070064 desc = irq_to_desc_alloc_node(irq, node);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080065 if (!desc) {
66 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
67 return NULL;
68 }
69
70 irq_iommu = desc->irq_2_iommu;
71
72 if (!irq_iommu)
Yinghai Lu85ac16d2009-04-27 18:00:38 -070073 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080074
75 return desc->irq_2_iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -070076}
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +020077
Yinghai Lue420dfb2008-08-19 20:50:21 -070078static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
79{
Yinghai Lu85ac16d2009-04-27 18:00:38 -070080 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080081}
82
83#else /* !CONFIG_SPARSE_IRQ */
84
85static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
86
87static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
88{
89 if (irq < nr_irqs)
90 return &irq_2_iommuX[irq];
91
92 return NULL;
93}
94static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
95{
Yinghai Lue420dfb2008-08-19 20:50:21 -070096 return irq_2_iommu(irq);
97}
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -080098#endif
Suresh Siddhab6fcb332008-07-10 11:16:44 -070099
100static DEFINE_SPINLOCK(irq_2_ir_lock);
101
Yinghai Lue420dfb2008-08-19 20:50:21 -0700102static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
103{
104 struct irq_2_iommu *irq_iommu;
105
106 irq_iommu = irq_2_iommu(irq);
107
108 if (!irq_iommu)
109 return NULL;
110
111 if (!irq_iommu->iommu)
112 return NULL;
113
114 return irq_iommu;
115}
116
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700117int irq_remapped(int irq)
118{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700119 return valid_irq_2_iommu(irq) != NULL;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700120}
121
122int get_irte(int irq, struct irte *entry)
123{
124 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700125 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700126 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700127
Yinghai Lue420dfb2008-08-19 20:50:21 -0700128 if (!entry)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700129 return -1;
130
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700131 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700132 irq_iommu = valid_irq_2_iommu(irq);
133 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700134 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700135 return -1;
136 }
137
Yinghai Lue420dfb2008-08-19 20:50:21 -0700138 index = irq_iommu->irte_index + irq_iommu->sub_handle;
139 *entry = *(irq_iommu->iommu->ir_table->base + index);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700140
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700141 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700142 return 0;
143}
144
145int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
146{
147 struct ir_table *table = iommu->ir_table;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700148 struct irq_2_iommu *irq_iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700149 u16 index, start_index;
150 unsigned int mask = 0;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700151 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700152 int i;
153
154 if (!count)
155 return -1;
156
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800157#ifndef CONFIG_SPARSE_IRQ
Yinghai Lue420dfb2008-08-19 20:50:21 -0700158 /* protect irq_2_iommu_alloc later */
159 if (irq >= nr_irqs)
160 return -1;
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800161#endif
Yinghai Lue420dfb2008-08-19 20:50:21 -0700162
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700163 /*
164 * start the IRTE search from index 0.
165 */
166 index = start_index = 0;
167
168 if (count > 1) {
169 count = __roundup_pow_of_two(count);
170 mask = ilog2(count);
171 }
172
173 if (mask > ecap_max_handle_mask(iommu->ecap)) {
174 printk(KERN_ERR
175 "Requested mask %x exceeds the max invalidation handle"
176 " mask value %Lx\n", mask,
177 ecap_max_handle_mask(iommu->ecap));
178 return -1;
179 }
180
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700181 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700182 do {
183 for (i = index; i < index + count; i++)
184 if (table->base[i].present)
185 break;
186 /* empty index found */
187 if (i == index + count)
188 break;
189
190 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
191
192 if (index == start_index) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700193 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700194 printk(KERN_ERR "can't allocate an IRTE\n");
195 return -1;
196 }
197 } while (1);
198
199 for (i = index; i < index + count; i++)
200 table->base[i].present = 1;
201
Yinghai Lue420dfb2008-08-19 20:50:21 -0700202 irq_iommu = irq_2_iommu_alloc(irq);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800203 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700204 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800205 printk(KERN_ERR "can't allocate irq_2_iommu\n");
206 return -1;
207 }
208
Yinghai Lue420dfb2008-08-19 20:50:21 -0700209 irq_iommu->iommu = iommu;
210 irq_iommu->irte_index = index;
211 irq_iommu->sub_handle = 0;
212 irq_iommu->irte_mask = mask;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700213
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700214 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700215
216 return index;
217}
218
Yu Zhao704126a2009-01-04 16:28:52 +0800219static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700220{
221 struct qi_desc desc;
222
223 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
224 | QI_IEC_SELECTIVE;
225 desc.high = 0;
226
Yu Zhao704126a2009-01-04 16:28:52 +0800227 return qi_submit_sync(&desc, iommu);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700228}
229
230int map_irq_to_irte_handle(int irq, u16 *sub_handle)
231{
232 int index;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700233 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700234 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700235
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700236 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700237 irq_iommu = valid_irq_2_iommu(irq);
238 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700240 return -1;
241 }
242
Yinghai Lue420dfb2008-08-19 20:50:21 -0700243 *sub_handle = irq_iommu->sub_handle;
244 index = irq_iommu->irte_index;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700245 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700246 return index;
247}
248
249int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
250{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700251 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700252 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700253
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700254 spin_lock_irqsave(&irq_2_ir_lock, flags);
Suresh Siddha7ddfb652008-08-20 17:22:51 -0700255
256 irq_iommu = irq_2_iommu_alloc(irq);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700257
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800258 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700259 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800260 printk(KERN_ERR "can't allocate irq_2_iommu\n");
261 return -1;
262 }
263
Yinghai Lue420dfb2008-08-19 20:50:21 -0700264 irq_iommu->iommu = iommu;
265 irq_iommu->irte_index = index;
266 irq_iommu->sub_handle = subhandle;
267 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700268
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700269 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700270
271 return 0;
272}
273
274int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
275{
Yinghai Lue420dfb2008-08-19 20:50:21 -0700276 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700277 unsigned long flags;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700278
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700279 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700280 irq_iommu = valid_irq_2_iommu(irq);
281 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700282 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700283 return -1;
284 }
285
Yinghai Lue420dfb2008-08-19 20:50:21 -0700286 irq_iommu->iommu = NULL;
287 irq_iommu->irte_index = 0;
288 irq_iommu->sub_handle = 0;
289 irq_2_iommu(irq)->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700290
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700291 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700292
293 return 0;
294}
295
296int modify_irte(int irq, struct irte *irte_modified)
297{
Yu Zhao704126a2009-01-04 16:28:52 +0800298 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700299 int index;
300 struct irte *irte;
301 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700302 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700303 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700304
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700305 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700306 irq_iommu = valid_irq_2_iommu(irq);
307 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700308 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700309 return -1;
310 }
311
Yinghai Lue420dfb2008-08-19 20:50:21 -0700312 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700313
Yinghai Lue420dfb2008-08-19 20:50:21 -0700314 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700315 irte = &iommu->ir_table->base[index];
316
Suresh Siddha9d783ba2009-03-16 17:04:55 -0700317 set_64bit((unsigned long *)irte, irte_modified->low);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700318 __iommu_flush_cache(iommu, irte, sizeof(*irte));
319
Yu Zhao704126a2009-01-04 16:28:52 +0800320 rc = qi_flush_iec(iommu, index, 0);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700321 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Yu Zhao704126a2009-01-04 16:28:52 +0800322
323 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700324}
325
326int flush_irte(int irq)
327{
Yu Zhao704126a2009-01-04 16:28:52 +0800328 int rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700329 int index;
330 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700331 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700332 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700333
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700334 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700335 irq_iommu = valid_irq_2_iommu(irq);
336 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700337 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700338 return -1;
339 }
340
Yinghai Lue420dfb2008-08-19 20:50:21 -0700341 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700342
Yinghai Lue420dfb2008-08-19 20:50:21 -0700343 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700344
Yu Zhao704126a2009-01-04 16:28:52 +0800345 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700346 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700347
Yu Zhao704126a2009-01-04 16:28:52 +0800348 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700349}
350
Suresh Siddha89027d32008-07-10 11:16:56 -0700351struct intel_iommu *map_ioapic_to_ir(int apic)
352{
353 int i;
354
355 for (i = 0; i < MAX_IO_APICS; i++)
356 if (ir_ioapic[i].id == apic)
357 return ir_ioapic[i].iommu;
358 return NULL;
359}
360
Suresh Siddha75c46fa2008-07-10 11:16:57 -0700361struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
362{
363 struct dmar_drhd_unit *drhd;
364
365 drhd = dmar_find_matched_drhd_unit(dev);
366 if (!drhd)
367 return NULL;
368
369 return drhd->iommu;
370}
371
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700372int free_irte(int irq)
373{
Yu Zhao704126a2009-01-04 16:28:52 +0800374 int rc = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700375 int index, i;
376 struct irte *irte;
377 struct intel_iommu *iommu;
Yinghai Lue420dfb2008-08-19 20:50:21 -0700378 struct irq_2_iommu *irq_iommu;
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700379 unsigned long flags;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700380
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700381 spin_lock_irqsave(&irq_2_ir_lock, flags);
Yinghai Lue420dfb2008-08-19 20:50:21 -0700382 irq_iommu = valid_irq_2_iommu(irq);
383 if (!irq_iommu) {
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700384 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700385 return -1;
386 }
387
Yinghai Lue420dfb2008-08-19 20:50:21 -0700388 iommu = irq_iommu->iommu;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700389
Yinghai Lue420dfb2008-08-19 20:50:21 -0700390 index = irq_iommu->irte_index + irq_iommu->sub_handle;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700391 irte = &iommu->ir_table->base[index];
392
Yinghai Lue420dfb2008-08-19 20:50:21 -0700393 if (!irq_iommu->sub_handle) {
394 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
Suresh Siddha2e934562009-03-16 17:04:58 -0700395 set_64bit((unsigned long *)(irte + i), 0);
Yu Zhao704126a2009-01-04 16:28:52 +0800396 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700397 }
398
Yinghai Lue420dfb2008-08-19 20:50:21 -0700399 irq_iommu->iommu = NULL;
400 irq_iommu->irte_index = 0;
401 irq_iommu->sub_handle = 0;
402 irq_iommu->irte_mask = 0;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700403
Suresh Siddha4c5502b2009-03-16 17:04:53 -0700404 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700405
Yu Zhao704126a2009-01-04 16:28:52 +0800406 return rc;
Suresh Siddhab6fcb332008-07-10 11:16:44 -0700407}
408
Suresh Siddha2ae21012008-07-10 11:16:43 -0700409static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
410{
411 u64 addr;
412 u32 cmd, sts;
413 unsigned long flags;
414
415 addr = virt_to_phys((void *)iommu->ir_table->base);
416
417 spin_lock_irqsave(&iommu->register_lock, flags);
418
419 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
420 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
421
422 /* Set interrupt-remapping table pointer */
423 cmd = iommu->gcmd | DMA_GCMD_SIRTP;
Han, Weidong161fde02009-04-03 17:15:47 +0800424 iommu->gcmd |= DMA_GCMD_SIRTP;
Suresh Siddha2ae21012008-07-10 11:16:43 -0700425 writel(cmd, iommu->reg + DMAR_GCMD_REG);
426
427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
428 readl, (sts & DMA_GSTS_IRTPS), sts);
429 spin_unlock_irqrestore(&iommu->register_lock, flags);
430
431 /*
432 * global invalidation of interrupt entry cache before enabling
433 * interrupt-remapping.
434 */
435 qi_global_iec(iommu);
436
437 spin_lock_irqsave(&iommu->register_lock, flags);
438
439 /* Enable interrupt-remapping */
440 cmd = iommu->gcmd | DMA_GCMD_IRE;
441 iommu->gcmd |= DMA_GCMD_IRE;
442 writel(cmd, iommu->reg + DMAR_GCMD_REG);
443
444 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
445 readl, (sts & DMA_GSTS_IRES), sts);
446
447 spin_unlock_irqrestore(&iommu->register_lock, flags);
448}
449
450
451static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
452{
453 struct ir_table *ir_table;
454 struct page *pages;
455
456 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700457 GFP_ATOMIC);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700458
459 if (!iommu->ir_table)
460 return -ENOMEM;
461
Suresh Siddhafa4b57c2009-03-16 17:05:05 -0700462 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
Suresh Siddha2ae21012008-07-10 11:16:43 -0700463
464 if (!pages) {
465 printk(KERN_ERR "failed to allocate pages of order %d\n",
466 INTR_REMAP_PAGE_ORDER);
467 kfree(iommu->ir_table);
468 return -ENOMEM;
469 }
470
471 ir_table->base = page_address(pages);
472
473 iommu_set_intr_remapping(iommu, mode);
474 return 0;
475}
476
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700477/*
478 * Disable Interrupt Remapping.
479 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700480static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700481{
482 unsigned long flags;
483 u32 sts;
484
485 if (!ecap_ir_support(iommu->ecap))
486 return;
487
Fenghua Yub24696b2009-03-27 14:22:44 -0700488 /*
489 * global invalidation of interrupt entry cache before disabling
490 * interrupt-remapping.
491 */
492 qi_global_iec(iommu);
493
Suresh Siddhaeba67e52009-03-16 17:04:56 -0700494 spin_lock_irqsave(&iommu->register_lock, flags);
495
496 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
497 if (!(sts & DMA_GSTS_IRES))
498 goto end;
499
500 iommu->gcmd &= ~DMA_GCMD_IRE;
501 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
502
503 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
504 readl, !(sts & DMA_GSTS_IRES), sts);
505
506end:
507 spin_unlock_irqrestore(&iommu->register_lock, flags);
508}
509
Weidong Han93758232009-04-17 16:42:14 +0800510int __init intr_remapping_supported(void)
511{
512 struct dmar_drhd_unit *drhd;
513
Weidong Han03ea8152009-04-17 16:42:15 +0800514 if (disable_intremap)
515 return 0;
516
Weidong Han93758232009-04-17 16:42:14 +0800517 for_each_drhd_unit(drhd) {
518 struct intel_iommu *iommu = drhd->iommu;
519
520 if (!ecap_ir_support(iommu->ecap))
521 return 0;
522 }
523
524 return 1;
525}
526
Suresh Siddha2ae21012008-07-10 11:16:43 -0700527int __init enable_intr_remapping(int eim)
528{
529 struct dmar_drhd_unit *drhd;
530 int setup = 0;
531
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700532 for_each_drhd_unit(drhd) {
533 struct intel_iommu *iommu = drhd->iommu;
534
535 /*
Han, Weidong34aaaa92009-04-04 17:21:26 +0800536 * If the queued invalidation is already initialized,
537 * shouldn't disable it.
538 */
539 if (iommu->qi)
540 continue;
541
542 /*
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700543 * Clear previous faults.
544 */
545 dmar_fault(-1, iommu);
546
547 /*
548 * Disable intr remapping and queued invalidation, if already
549 * enabled prior to OS handover.
550 */
Fenghua Yub24696b2009-03-27 14:22:44 -0700551 iommu_disable_intr_remapping(iommu);
Suresh Siddha1531a6a2009-03-16 17:04:57 -0700552
553 dmar_disable_qi(iommu);
554 }
555
Suresh Siddha2ae21012008-07-10 11:16:43 -0700556 /*
557 * check for the Interrupt-remapping support
558 */
559 for_each_drhd_unit(drhd) {
560 struct intel_iommu *iommu = drhd->iommu;
561
562 if (!ecap_ir_support(iommu->ecap))
563 continue;
564
565 if (eim && !ecap_eim_support(iommu->ecap)) {
566 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
567 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
568 return -1;
569 }
570 }
571
572 /*
573 * Enable queued invalidation for all the DRHD's.
574 */
575 for_each_drhd_unit(drhd) {
576 int ret;
577 struct intel_iommu *iommu = drhd->iommu;
578 ret = dmar_enable_qi(iommu);
579
580 if (ret) {
581 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
582 " invalidation, ecap %Lx, ret %d\n",
583 drhd->reg_base_addr, iommu->ecap, ret);
584 return -1;
585 }
586 }
587
588 /*
589 * Setup Interrupt-remapping for all the DRHD's now.
590 */
591 for_each_drhd_unit(drhd) {
592 struct intel_iommu *iommu = drhd->iommu;
593
594 if (!ecap_ir_support(iommu->ecap))
595 continue;
596
597 if (setup_intr_remapping(iommu, eim))
598 goto error;
599
600 setup = 1;
601 }
602
603 if (!setup)
604 goto error;
605
606 intr_remapping_enabled = 1;
607
608 return 0;
609
610error:
611 /*
612 * handle error condition gracefully here!
613 */
614 return -1;
615}
Suresh Siddhaad3ad3f2008-07-10 11:16:40 -0700616
617static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
618 struct intel_iommu *iommu)
619{
620 struct acpi_dmar_hardware_unit *drhd;
621 struct acpi_dmar_device_scope *scope;
622 void *start, *end;
623
624 drhd = (struct acpi_dmar_hardware_unit *)header;
625
626 start = (void *)(drhd + 1);
627 end = ((void *)drhd) + header->length;
628
629 while (start < end) {
630 scope = start;
631 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
632 if (ir_ioapic_num == MAX_IO_APICS) {
633 printk(KERN_WARNING "Exceeded Max IO APICS\n");
634 return -1;
635 }
636
637 printk(KERN_INFO "IOAPIC id %d under DRHD base"
638 " 0x%Lx\n", scope->enumeration_id,
639 drhd->address);
640
641 ir_ioapic[ir_ioapic_num].iommu = iommu;
642 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
643 ir_ioapic_num++;
644 }
645 start += scope->length;
646 }
647
648 return 0;
649}
650
651/*
652 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
653 * hardware unit.
654 */
655int __init parse_ioapics_under_ir(void)
656{
657 struct dmar_drhd_unit *drhd;
658 int ir_supported = 0;
659
660 for_each_drhd_unit(drhd) {
661 struct intel_iommu *iommu = drhd->iommu;
662
663 if (ecap_ir_support(iommu->ecap)) {
664 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
665 return -1;
666
667 ir_supported = 1;
668 }
669 }
670
671 if (ir_supported && ir_ioapic_num != nr_ioapics) {
672 printk(KERN_WARNING
673 "Not all IO-APIC's listed under remapping hardware\n");
674 return -1;
675 }
676
677 return ir_supported;
678}
Fenghua Yub24696b2009-03-27 14:22:44 -0700679
680void disable_intr_remapping(void)
681{
682 struct dmar_drhd_unit *drhd;
683 struct intel_iommu *iommu = NULL;
684
685 /*
686 * Disable Interrupt-remapping for all the DRHD's now.
687 */
688 for_each_iommu(iommu, drhd) {
689 if (!ecap_ir_support(iommu->ecap))
690 continue;
691
692 iommu_disable_intr_remapping(iommu);
693 }
694}
695
696int reenable_intr_remapping(int eim)
697{
698 struct dmar_drhd_unit *drhd;
699 int setup = 0;
700 struct intel_iommu *iommu = NULL;
701
702 for_each_iommu(iommu, drhd)
703 if (iommu->qi)
704 dmar_reenable_qi(iommu);
705
706 /*
707 * Setup Interrupt-remapping for all the DRHD's now.
708 */
709 for_each_iommu(iommu, drhd) {
710 if (!ecap_ir_support(iommu->ecap))
711 continue;
712
713 /* Set up interrupt remapping for iommu.*/
714 iommu_set_intr_remapping(iommu, eim);
715 setup = 1;
716 }
717
718 if (!setup)
719 goto error;
720
721 return 0;
722
723error:
724 /*
725 * handle error condition gracefully here!
726 */
727 return -1;
728}
729