Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Dynamic DMA mapping support for AMD Hammer. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. |
| 5 | * This allows to use PCI devices that only support 32bit addresses on systems |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 6 | * with more than 4GB. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * |
| 8 | * See Documentation/DMA-mapping.txt for the interface specification. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 9 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Copyright 2002 Andi Kleen, SuSE Labs. |
Andi Kleen | ff7f364 | 2007-10-17 18:04:37 +0200 | [diff] [blame] | 11 | * Subject to the GNU General Public License v2 only. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/types.h> |
| 15 | #include <linux/ctype.h> |
| 16 | #include <linux/agp_backend.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/pci.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/topology.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/bitops.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 26 | #include <linux/kdebug.h> |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 27 | #include <linux/scatterlist.h> |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 28 | #include <linux/iommu-helper.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/atomic.h> |
| 30 | #include <asm/io.h> |
| 31 | #include <asm/mtrr.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/proto.h> |
Joerg Roedel | 395624f | 2007-10-24 12:49:47 +0200 | [diff] [blame] | 34 | #include <asm/gart.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/cacheflush.h> |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 36 | #include <asm/swiotlb.h> |
| 37 | #include <asm/dma.h> |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 38 | #include <asm/k8.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Joerg Roedel | 79da087 | 2007-10-24 12:49:49 +0200 | [diff] [blame] | 40 | static unsigned long iommu_bus_base; /* GART remapping area (physical) */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 41 | static unsigned long iommu_size; /* size of remapping area bytes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | static unsigned long iommu_pages; /* .. and in pages */ |
| 43 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 44 | static u32 *iommu_gatt_base; /* Remapping table */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 46 | /* |
| 47 | * If this is disabled the IOMMU will use an optimized flushing strategy |
| 48 | * of only flushing when an mapping is reused. With it true the GART is |
| 49 | * flushed for every mapping. Problem is that doing the lazy flush seems |
| 50 | * to trigger bugs with some popular PCI cards, in particular 3ware (but |
| 51 | * has been also also seen with Qlogic at least). |
| 52 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | int iommu_fullflush = 1; |
| 54 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 55 | /* Allocation bitmap for the remapping area: */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 57 | /* Guarded by iommu_bitmap_lock: */ |
| 58 | static unsigned long *iommu_gart_bitmap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 60 | static u32 gart_unmapped_entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
| 62 | #define GPTE_VALID 1 |
| 63 | #define GPTE_COHERENT 2 |
| 64 | #define GPTE_ENCODE(x) \ |
| 65 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
| 66 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
| 67 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 68 | #define to_pages(addr, size) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) |
| 70 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 71 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | #ifdef CONFIG_AGP |
| 74 | #define AGPEXTERN extern |
| 75 | #else |
| 76 | #define AGPEXTERN |
| 77 | #endif |
| 78 | |
| 79 | /* backdoor interface to AGP driver */ |
| 80 | AGPEXTERN int agp_memory_reserved; |
| 81 | AGPEXTERN __u32 *agp_gatt_table; |
| 82 | |
| 83 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 84 | static int need_flush; /* global flush state. set for each gart wrap */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 86 | static unsigned long alloc_iommu(struct device *dev, int size) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 87 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | unsigned long offset, flags; |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 89 | unsigned long boundary_size; |
| 90 | unsigned long base_index; |
| 91 | |
| 92 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
| 93 | PAGE_SIZE) >> PAGE_SHIFT; |
| 94 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
| 95 | PAGE_SIZE) >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 97 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 98 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
| 99 | size, base_index, boundary_size, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | if (offset == -1) { |
| 101 | need_flush = 1; |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 102 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
| 103 | size, base_index, boundary_size, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 105 | if (offset != -1) { |
| 106 | set_bit_string(iommu_gart_bitmap, offset, size); |
| 107 | next_bit = offset+size; |
| 108 | if (next_bit >= iommu_pages) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | next_bit = 0; |
| 110 | need_flush = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 111 | } |
| 112 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | if (iommu_fullflush) |
| 114 | need_flush = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 115 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
| 116 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | return offset; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 118 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
| 120 | static void free_iommu(unsigned long offset, int size) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 121 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | unsigned long flags; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 125 | iommu_area_free(iommu_gart_bitmap, offset, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 127 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 129 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | * Use global flush state to avoid races with multiple flushers. |
| 131 | */ |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 132 | static void flush_gart(void) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 133 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | unsigned long flags; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 135 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 137 | if (need_flush) { |
| 138 | k8_flush_garts(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | need_flush = 0; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 140 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 142 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | #ifdef CONFIG_IOMMU_LEAK |
| 145 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 146 | #define SET_LEAK(x) \ |
| 147 | do { \ |
| 148 | if (iommu_leak_tab) \ |
| 149 | iommu_leak_tab[x] = __builtin_return_address(0);\ |
| 150 | } while (0) |
| 151 | |
| 152 | #define CLEAR_LEAK(x) \ |
| 153 | do { \ |
| 154 | if (iommu_leak_tab) \ |
| 155 | iommu_leak_tab[x] = NULL; \ |
| 156 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
| 158 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 159 | static void **iommu_leak_tab; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | static int leak_trace; |
Joerg Roedel | 79da087 | 2007-10-24 12:49:49 +0200 | [diff] [blame] | 161 | static int iommu_leak_pages = 20; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 162 | |
Joerg Roedel | 79da087 | 2007-10-24 12:49:49 +0200 | [diff] [blame] | 163 | static void dump_leak(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | { |
| 165 | int i; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 166 | static int dump; |
| 167 | |
| 168 | if (dump || !iommu_leak_tab) |
| 169 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | dump = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 171 | show_stack(NULL, NULL); |
| 172 | |
| 173 | /* Very crude. dump some from the end of the table too */ |
| 174 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", |
| 175 | iommu_leak_pages); |
| 176 | for (i = 0; i < iommu_leak_pages; i += 2) { |
| 177 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); |
Arjan van de Ven | bc850d6 | 2008-01-30 13:33:07 +0100 | [diff] [blame] | 178 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 179 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
| 180 | } |
| 181 | printk(KERN_DEBUG "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
| 183 | #else |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 184 | # define SET_LEAK(x) |
| 185 | # define CLEAR_LEAK(x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | #endif |
| 187 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 188 | static void iommu_full(struct device *dev, size_t size, int dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | { |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 190 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | * Ran out of IOMMU space for this operation. This is very bad. |
| 192 | * Unfortunately the drivers cannot handle this operation properly. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 193 | * Return some non mapped prereserved space in the aperture and |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | * let the Northbridge deal with it. This will result in garbage |
| 195 | * in the IO operation. When the size exceeds the prereserved space |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 196 | * memory corruption will occur or random memory will be DMAed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | * out. Hopefully no network devices use single mappings that big. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 198 | */ |
| 199 | |
| 200 | printk(KERN_ERR |
| 201 | "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n", |
| 202 | size, dev->bus_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 204 | if (size > PAGE_SIZE*EMERGENCY_PAGES) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
| 206 | panic("PCI-DMA: Memory would be corrupted\n"); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 207 | if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) |
| 208 | panic(KERN_ERR |
| 209 | "PCI-DMA: Random memory would be DMAed\n"); |
| 210 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | #ifdef CONFIG_IOMMU_LEAK |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 212 | dump_leak(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | } |
| 215 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 216 | static inline int |
| 217 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
| 218 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | u64 mask = *dev->dma_mask; |
Andi Kleen | 00edefa | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 220 | int high = addr + size > mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | int mmu = high; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 222 | |
| 223 | if (force_iommu) |
| 224 | mmu = 1; |
| 225 | |
| 226 | return mmu; |
| 227 | } |
| 228 | |
| 229 | static inline int |
| 230 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
| 231 | { |
| 232 | u64 mask = *dev->dma_mask; |
| 233 | int high = addr + size > mask; |
| 234 | int mmu = high; |
| 235 | |
| 236 | return mmu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | /* Map a single continuous physical area into the IOMMU. |
| 240 | * Caller needs to check if the iommu is needed and flush. |
| 241 | */ |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 242 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
| 243 | size_t size, int dir) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 244 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | unsigned long npages = to_pages(phys_mem, size); |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 246 | unsigned long iommu_page = alloc_iommu(dev, npages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | int i; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | if (iommu_page == -1) { |
| 250 | if (!nonforced_iommu(dev, phys_mem, size)) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 251 | return phys_mem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | if (panic_on_overflow) |
| 253 | panic("dma_map_area overflow %lu bytes\n", size); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 254 | iommu_full(dev, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | return bad_dma_address; |
| 256 | } |
| 257 | |
| 258 | for (i = 0; i < npages; i++) { |
| 259 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); |
| 260 | SET_LEAK(iommu_page + i); |
| 261 | phys_mem += PAGE_SIZE; |
| 262 | } |
| 263 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
| 264 | } |
| 265 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 266 | static dma_addr_t |
| 267 | gart_map_simple(struct device *dev, char *buf, size_t size, int dir) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 268 | { |
| 269 | dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 270 | |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 271 | flush_gart(); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 272 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 273 | return map; |
| 274 | } |
| 275 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | /* Map a single area into the IOMMU */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 277 | static dma_addr_t |
| 278 | gart_map_single(struct device *dev, void *addr, size_t size, int dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | { |
| 280 | unsigned long phys_mem, bus; |
| 281 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | if (!dev) |
| 283 | dev = &fallback_dev; |
| 284 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 285 | phys_mem = virt_to_phys(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | if (!need_iommu(dev, phys_mem, size)) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 287 | return phys_mem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 289 | bus = gart_map_simple(dev, addr, size, dir); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 290 | |
| 291 | return bus; |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 292 | } |
| 293 | |
| 294 | /* |
Jon Mason | 7c2d9cd | 2006-06-26 13:56:37 +0200 | [diff] [blame] | 295 | * Free a DMA mapping. |
| 296 | */ |
Yinghai Lu | 1048fa5 | 2007-07-21 17:11:23 +0200 | [diff] [blame] | 297 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 298 | size_t size, int direction) |
Jon Mason | 7c2d9cd | 2006-06-26 13:56:37 +0200 | [diff] [blame] | 299 | { |
| 300 | unsigned long iommu_page; |
| 301 | int npages; |
| 302 | int i; |
| 303 | |
| 304 | if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || |
| 305 | dma_addr >= iommu_bus_base + iommu_size) |
| 306 | return; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 307 | |
Jon Mason | 7c2d9cd | 2006-06-26 13:56:37 +0200 | [diff] [blame] | 308 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
| 309 | npages = to_pages(dma_addr, size); |
| 310 | for (i = 0; i < npages; i++) { |
| 311 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
| 312 | CLEAR_LEAK(iommu_page + i); |
| 313 | } |
| 314 | free_iommu(iommu_page, npages); |
| 315 | } |
| 316 | |
| 317 | /* |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 318 | * Wrapper for pci_unmap_single working with scatterlists. |
| 319 | */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 320 | static void |
| 321 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 322 | { |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 323 | struct scatterlist *s; |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 324 | int i; |
| 325 | |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 326 | for_each_sg(sg, s, nents, i) { |
Jon Mason | 60b08c6 | 2006-02-26 04:18:22 +0100 | [diff] [blame] | 327 | if (!s->dma_length || !s->length) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 328 | break; |
Jon Mason | 7c2d9cd | 2006-06-26 13:56:37 +0200 | [diff] [blame] | 329 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 330 | } |
| 331 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
| 333 | /* Fallback for dma_map_sg in case of overflow */ |
| 334 | static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, |
| 335 | int nents, int dir) |
| 336 | { |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 337 | struct scatterlist *s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | int i; |
| 339 | |
| 340 | #ifdef CONFIG_IOMMU_DEBUG |
| 341 | printk(KERN_DEBUG "dma_map_sg overflow\n"); |
| 342 | #endif |
| 343 | |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 344 | for_each_sg(sg, s, nents, i) { |
Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 345 | unsigned long addr = sg_phys(s); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 346 | |
| 347 | if (nonforced_iommu(dev, addr, s->length)) { |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 348 | addr = dma_map_area(dev, addr, s->length, dir); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 349 | if (addr == bad_dma_address) { |
| 350 | if (i > 0) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 351 | gart_unmap_sg(dev, sg, i, dir); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 352 | nents = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | sg[0].dma_length = 0; |
| 354 | break; |
| 355 | } |
| 356 | } |
| 357 | s->dma_address = addr; |
| 358 | s->dma_length = s->length; |
| 359 | } |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 360 | flush_gart(); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 361 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | return nents; |
| 363 | } |
| 364 | |
| 365 | /* Map multiple scatterlist entries continuous into the first. */ |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 366 | static int __dma_map_cont(struct device *dev, struct scatterlist *start, |
| 367 | int nelems, struct scatterlist *sout, |
| 368 | unsigned long pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | { |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 370 | unsigned long iommu_start = alloc_iommu(dev, pages); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 371 | unsigned long iommu_page = iommu_start; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 372 | struct scatterlist *s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | int i; |
| 374 | |
| 375 | if (iommu_start == -1) |
| 376 | return -1; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 377 | |
| 378 | for_each_sg(start, s, nelems, i) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | unsigned long pages, addr; |
| 380 | unsigned long phys_addr = s->dma_address; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 381 | |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 382 | BUG_ON(s != start && s->offset); |
| 383 | if (s == start) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | sout->dma_address = iommu_bus_base; |
| 385 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; |
| 386 | sout->dma_length = s->length; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 387 | } else { |
| 388 | sout->dma_length += s->length; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | addr = phys_addr; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 392 | pages = to_pages(s->offset, s->length); |
| 393 | while (pages--) { |
| 394 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | SET_LEAK(iommu_page); |
| 396 | addr += PAGE_SIZE; |
| 397 | iommu_page++; |
Andi Kleen | 0d541064 | 2006-02-12 14:34:59 -0800 | [diff] [blame] | 398 | } |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 399 | } |
| 400 | BUG_ON(iommu_page - iommu_start != pages); |
| 401 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | return 0; |
| 403 | } |
| 404 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 405 | static inline int |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 406 | dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, |
| 407 | struct scatterlist *sout, unsigned long pages, int need) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | { |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 409 | if (!need) { |
| 410 | BUG_ON(nelems != 1); |
FUJITA Tomonori | e88a39d | 2007-10-25 09:13:32 +0200 | [diff] [blame] | 411 | sout->dma_address = start->dma_address; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 412 | sout->dma_length = start->length; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | return 0; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 414 | } |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 415 | return __dma_map_cont(dev, start, nelems, sout, pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | } |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 417 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | /* |
| 419 | * DMA map all entries in a scatterlist. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 420 | * Merge chunks that have page aligned sizes into a continuous mapping. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 422 | static int |
| 423 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | { |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 425 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 426 | int need = 0, nextneed, i, out, start; |
| 427 | unsigned long pages = 0; |
FUJITA Tomonori | 42d0028 | 2008-02-04 22:27:56 -0800 | [diff] [blame] | 428 | unsigned int seg_size; |
| 429 | unsigned int max_seg_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 431 | if (nents == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | return 0; |
| 433 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | if (!dev) |
| 435 | dev = &fallback_dev; |
| 436 | |
| 437 | out = 0; |
| 438 | start = 0; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 439 | start_sg = sgmap = sg; |
FUJITA Tomonori | 42d0028 | 2008-02-04 22:27:56 -0800 | [diff] [blame] | 440 | seg_size = 0; |
| 441 | max_seg_size = dma_get_max_seg_size(dev); |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 442 | ps = NULL; /* shut up gcc */ |
| 443 | for_each_sg(sg, s, nents, i) { |
Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 444 | dma_addr_t addr = sg_phys(s); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 446 | s->dma_address = addr; |
| 447 | BUG_ON(s->length == 0); |
| 448 | |
| 449 | nextneed = need_iommu(dev, addr, s->length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
| 451 | /* Handle the previous not yet processed entries */ |
| 452 | if (i > start) { |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 453 | /* |
| 454 | * Can only merge when the last chunk ends on a |
| 455 | * page boundary and the new one doesn't have an |
| 456 | * offset. |
| 457 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | if (!iommu_merge || !nextneed || !need || s->offset || |
FUJITA Tomonori | 42d0028 | 2008-02-04 22:27:56 -0800 | [diff] [blame] | 459 | (s->length + seg_size > max_seg_size) || |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 460 | (ps->offset + ps->length) % PAGE_SIZE) { |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 461 | if (dma_map_cont(dev, start_sg, i - start, |
| 462 | sgmap, pages, need) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | goto error; |
| 464 | out++; |
FUJITA Tomonori | 42d0028 | 2008-02-04 22:27:56 -0800 | [diff] [blame] | 465 | seg_size = 0; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 466 | sgmap = sg_next(sgmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | pages = 0; |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 468 | start = i; |
| 469 | start_sg = s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | } |
| 471 | } |
| 472 | |
FUJITA Tomonori | 42d0028 | 2008-02-04 22:27:56 -0800 | [diff] [blame] | 473 | seg_size += s->length; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | need = nextneed; |
| 475 | pages += to_pages(s->offset, s->length); |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 476 | ps = s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } |
FUJITA Tomonori | fde9a10 | 2008-02-04 22:28:11 -0800 | [diff] [blame^] | 478 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | goto error; |
| 480 | out++; |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 481 | flush_gart(); |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 482 | if (out < nents) { |
| 483 | sgmap = sg_next(sgmap); |
| 484 | sgmap->dma_length = 0; |
| 485 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | return out; |
| 487 | |
| 488 | error: |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 489 | flush_gart(); |
FUJITA Tomonori | 5336940 | 2007-10-26 13:56:24 +0200 | [diff] [blame] | 490 | gart_unmap_sg(dev, sg, out, dir); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 491 | |
Kevin VanMaren | a1002a4 | 2006-02-03 21:51:32 +0100 | [diff] [blame] | 492 | /* When it was forced or merged try again in a dumb way */ |
| 493 | if (force_iommu || iommu_merge) { |
| 494 | out = dma_map_sg_nonforce(dev, sg, nents, dir); |
| 495 | if (out > 0) |
| 496 | return out; |
| 497 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | if (panic_on_overflow) |
| 499 | panic("dma_map_sg: overflow on %lu pages\n", pages); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 500 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 501 | iommu_full(dev, pages << PAGE_SHIFT, dir); |
Jens Axboe | 9ee1bea | 2007-10-04 09:35:37 +0200 | [diff] [blame] | 502 | for_each_sg(sg, s, nents, i) |
| 503 | s->dma_address = bad_dma_address; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | return 0; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 505 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 507 | static int no_agp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | |
| 509 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 510 | { |
| 511 | unsigned long a; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 513 | if (!iommu_size) { |
| 514 | iommu_size = aper_size; |
| 515 | if (!no_agp) |
| 516 | iommu_size /= 2; |
| 517 | } |
| 518 | |
| 519 | a = aper + iommu_size; |
Andi Kleen | 31422c5 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 520 | iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 522 | if (iommu_size < 64*1024*1024) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | printk(KERN_WARNING |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 524 | "PCI-DMA: Warning: Small IOMMU %luMB." |
| 525 | " Consider increasing the AGP aperture in BIOS\n", |
| 526 | iommu_size >> 20); |
| 527 | } |
| 528 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | return iommu_size; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 530 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 532 | static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) |
| 533 | { |
| 534 | unsigned aper_size = 0, aper_base_32, aper_order; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | u64 aper_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 537 | pci_read_config_dword(dev, 0x94, &aper_base_32); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | pci_read_config_dword(dev, 0x90, &aper_order); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 539 | aper_order = (aper_order >> 1) & 7; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 541 | aper_base = aper_base_32 & 0x7fff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | aper_base <<= 25; |
| 543 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 544 | aper_size = (32 * 1024 * 1024) << aper_order; |
| 545 | if (aper_base + aper_size > 0x100000000UL || !aper_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | aper_base = 0; |
| 547 | |
| 548 | *size = aper_size; |
| 549 | return aper_base; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 550 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 552 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | * Private Northbridge GATT initialization in case we cannot use the |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 554 | * AGP driver for some reason. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | */ |
| 556 | static __init int init_k8_gatt(struct agp_kern_info *info) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 557 | { |
| 558 | unsigned aper_size, gatt_size, new_aper_size; |
| 559 | unsigned aper_base, new_aper_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | struct pci_dev *dev; |
| 561 | void *gatt; |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 562 | int i; |
| 563 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
| 565 | aper_size = aper_base = info->aper_size = 0; |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 566 | dev = NULL; |
| 567 | for (i = 0; i < num_k8_northbridges; i++) { |
| 568 | dev = k8_northbridges[i]; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 569 | new_aper_base = read_aperture(dev, &new_aper_size); |
| 570 | if (!new_aper_base) |
| 571 | goto nommu; |
| 572 | |
| 573 | if (!aper_base) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | aper_size = new_aper_size; |
| 575 | aper_base = new_aper_base; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 576 | } |
| 577 | if (aper_size != new_aper_size || aper_base != new_aper_base) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | goto nommu; |
| 579 | } |
| 580 | if (!aper_base) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 581 | goto nommu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | info->aper_base = aper_base; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 583 | info->aper_size = aper_size >> 20; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 585 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
| 586 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); |
| 587 | if (!gatt) |
Joachim Deguara | cf6387d | 2007-04-24 13:05:36 +0200 | [diff] [blame] | 588 | panic("Cannot allocate GATT table"); |
Arjan van de Ven | 6d238cc | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 589 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
Joachim Deguara | cf6387d | 2007-04-24 13:05:36 +0200 | [diff] [blame] | 590 | panic("Could not set GART PTEs to uncacheable pages"); |
Joachim Deguara | cf6387d | 2007-04-24 13:05:36 +0200 | [diff] [blame] | 591 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 592 | memset(gatt, 0, gatt_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | agp_gatt_table = gatt; |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 594 | |
| 595 | for (i = 0; i < num_k8_northbridges; i++) { |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 596 | u32 gatt_reg; |
| 597 | u32 ctl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 599 | dev = k8_northbridges[i]; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 600 | gatt_reg = __pa(gatt) >> 12; |
| 601 | gatt_reg <<= 4; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | pci_write_config_dword(dev, 0x98, gatt_reg); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 603 | pci_read_config_dword(dev, 0x90, &ctl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | |
| 605 | ctl |= 1; |
| 606 | ctl &= ~((1<<4) | (1<<5)); |
| 607 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 608 | pci_write_config_dword(dev, 0x90, ctl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | } |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 610 | flush_gart(); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 611 | |
| 612 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
| 613 | aper_base, aper_size>>10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | return 0; |
| 615 | |
| 616 | nommu: |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 617 | /* Should not happen anymore */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n" |
Andi Kleen | f46ace6 | 2006-01-11 22:43:27 +0100 | [diff] [blame] | 619 | KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n"); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 620 | return -1; |
| 621 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | |
| 623 | extern int agp_amd64_init(void); |
| 624 | |
Stephen Hemminger | e658450 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 625 | static const struct dma_mapping_ops gart_dma_ops = { |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 626 | .mapping_error = NULL, |
| 627 | .map_single = gart_map_single, |
| 628 | .map_simple = gart_map_simple, |
| 629 | .unmap_single = gart_unmap_single, |
| 630 | .sync_single_for_cpu = NULL, |
| 631 | .sync_single_for_device = NULL, |
| 632 | .sync_single_range_for_cpu = NULL, |
| 633 | .sync_single_range_for_device = NULL, |
| 634 | .sync_sg_for_cpu = NULL, |
| 635 | .sync_sg_for_device = NULL, |
| 636 | .map_sg = gart_map_sg, |
| 637 | .unmap_sg = gart_unmap_sg, |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 638 | }; |
| 639 | |
Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 640 | void gart_iommu_shutdown(void) |
| 641 | { |
| 642 | struct pci_dev *dev; |
| 643 | int i; |
| 644 | |
| 645 | if (no_agp && (dma_ops != &gart_dma_ops)) |
| 646 | return; |
| 647 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 648 | for (i = 0; i < num_k8_northbridges; i++) { |
| 649 | u32 ctl; |
Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 650 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 651 | dev = k8_northbridges[i]; |
| 652 | pci_read_config_dword(dev, 0x90, &ctl); |
Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 653 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 654 | ctl &= ~1; |
Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 655 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 656 | pci_write_config_dword(dev, 0x90, ctl); |
| 657 | } |
Yinghai Lu | bc2cea6 | 2007-07-21 17:11:28 +0200 | [diff] [blame] | 658 | } |
| 659 | |
Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 660 | void __init gart_iommu_init(void) |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 661 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | struct agp_kern_info info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | unsigned long iommu_start; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 664 | unsigned long aper_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | unsigned long scratch; |
| 666 | long i; |
| 667 | |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 668 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { |
| 669 | printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n"); |
Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 670 | return; |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 671 | } |
| 672 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | #ifndef CONFIG_AGP_AMD64 |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 674 | no_agp = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | #else |
| 676 | /* Makefile puts PCI initialization via subsys_initcall first. */ |
| 677 | /* Add other K8 AGP bridge drivers here */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 678 | no_agp = no_agp || |
| 679 | (agp_amd64_init() < 0) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | (agp_copy_info(agp_bridge, &info) < 0); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 681 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | |
Jon Mason | 60b08c6 | 2006-02-26 04:18:22 +0100 | [diff] [blame] | 683 | if (swiotlb) |
Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 684 | return; |
Jon Mason | 60b08c6 | 2006-02-26 04:18:22 +0100 | [diff] [blame] | 685 | |
Jon Mason | 8d4f6b9 | 2006-06-26 13:58:05 +0200 | [diff] [blame] | 686 | /* Did we detect a different HW IOMMU? */ |
Joerg Roedel | 0440d4c | 2007-10-24 12:49:50 +0200 | [diff] [blame] | 687 | if (iommu_detected && !gart_iommu_aperture) |
Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 688 | return; |
Jon Mason | 8d4f6b9 | 2006-06-26 13:58:05 +0200 | [diff] [blame] | 689 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | if (no_iommu || |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 691 | (!force_iommu && end_pfn <= MAX_DMA32_PFN) || |
Joerg Roedel | 0440d4c | 2007-10-24 12:49:50 +0200 | [diff] [blame] | 692 | !gart_iommu_aperture || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | (no_agp && init_k8_gatt(&info) < 0)) { |
Jon Mason | 5b7b644 | 2006-02-03 21:51:59 +0100 | [diff] [blame] | 694 | if (end_pfn > MAX_DMA32_PFN) { |
| 695 | printk(KERN_ERR "WARNING more than 4GB of memory " |
Andi Kleen | 3807fd4 | 2006-12-07 02:14:13 +0100 | [diff] [blame] | 696 | "but GART IOMMU not available.\n" |
Andi Kleen | dc9a719 | 2006-05-30 22:47:48 +0200 | [diff] [blame] | 697 | KERN_ERR "WARNING 32bit PCI may malfunction.\n"); |
Jon Mason | 5b7b644 | 2006-02-03 21:51:59 +0100 | [diff] [blame] | 698 | } |
Jon Mason | 0dc243a | 2006-06-26 13:58:11 +0200 | [diff] [blame] | 699 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | } |
| 701 | |
Jon Mason | 5b7b644 | 2006-02-03 21:51:59 +0100 | [diff] [blame] | 702 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 703 | aper_size = info.aper_size * 1024 * 1024; |
| 704 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
| 705 | iommu_pages = iommu_size >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 707 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, |
| 708 | get_order(iommu_pages/8)); |
| 709 | if (!iommu_gart_bitmap) |
| 710 | panic("Cannot allocate iommu bitmap\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | memset(iommu_gart_bitmap, 0, iommu_pages/8); |
| 712 | |
| 713 | #ifdef CONFIG_IOMMU_LEAK |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 714 | if (leak_trace) { |
| 715 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | get_order(iommu_pages*sizeof(void *))); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 717 | if (iommu_leak_tab) |
| 718 | memset(iommu_leak_tab, 0, iommu_pages * 8); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | else |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 720 | printk(KERN_DEBUG |
| 721 | "PCI-DMA: Cannot allocate leak trace area\n"); |
| 722 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | #endif |
| 724 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 725 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | * Out of IOMMU space handling. |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 727 | * Reserve some invalid pages at the beginning of the GART. |
| 728 | */ |
| 729 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 731 | agp_memory_reserved = iommu_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | printk(KERN_INFO |
| 733 | "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 734 | iommu_size >> 20); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 736 | iommu_start = aper_size - iommu_size; |
| 737 | iommu_bus_base = info.aper_base + iommu_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | bad_dma_address = iommu_bus_base; |
| 739 | iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); |
| 740 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 741 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | * Unmap the IOMMU part of the GART. The alias of the page is |
| 743 | * always mapped with cache enabled and there is no full cache |
| 744 | * coherency across the GART remapping. The unmapping avoids |
| 745 | * automatic prefetches from the CPU allocating cache lines in |
| 746 | * there. All CPU accesses are done via the direct mapping to |
| 747 | * the backing memory. The GART address is only used by PCI |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 748 | * devices. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | */ |
Andi Kleen | 28d6ee4 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 750 | set_memory_np((unsigned long)__va(iommu_bus_base), |
| 751 | iommu_size >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 753 | /* |
| 754 | * Try to workaround a bug (thanks to BenH) |
| 755 | * Set unmapped entries to a scratch page instead of 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | * Any prefetches that hit unmapped entries won't get an bus abort |
| 757 | * then. |
| 758 | */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 759 | scratch = get_zeroed_page(GFP_KERNEL); |
| 760 | if (!scratch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | panic("Cannot allocate iommu scratch page"); |
| 762 | gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 763 | for (i = EMERGENCY_PAGES; i < iommu_pages; i++) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | iommu_gatt_base[i] = gart_unmapped_entry; |
| 765 | |
Andi Kleen | a32073b | 2006-06-26 13:56:40 +0200 | [diff] [blame] | 766 | flush_gart(); |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 767 | dma_ops = &gart_dma_ops; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 768 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | |
Sam Ravnborg | 43999d9 | 2007-03-16 21:07:36 +0100 | [diff] [blame] | 770 | void __init gart_parse_options(char *p) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 771 | { |
| 772 | int arg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | #ifdef CONFIG_IOMMU_LEAK |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 775 | if (!strncmp(p, "leak", 4)) { |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 776 | leak_trace = 1; |
| 777 | p += 4; |
| 778 | if (*p == '=') ++p; |
| 779 | if (isdigit(*p) && get_option(&p, &arg)) |
| 780 | iommu_leak_pages = arg; |
| 781 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | #endif |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 783 | if (isdigit(*p) && get_option(&p, &arg)) |
| 784 | iommu_size = arg; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 785 | if (!strncmp(p, "fullflush", 8)) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 786 | iommu_fullflush = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 787 | if (!strncmp(p, "nofullflush", 11)) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 788 | iommu_fullflush = 0; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 789 | if (!strncmp(p, "noagp", 5)) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 790 | no_agp = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 791 | if (!strncmp(p, "noaperture", 10)) |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 792 | fix_aperture = 0; |
| 793 | /* duplicated from pci-dma.c */ |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 794 | if (!strncmp(p, "force", 5)) |
Joerg Roedel | 0440d4c | 2007-10-24 12:49:50 +0200 | [diff] [blame] | 795 | gart_iommu_aperture_allowed = 1; |
Ingo Molnar | 05fccb0 | 2008-01-30 13:30:12 +0100 | [diff] [blame] | 796 | if (!strncmp(p, "allowed", 7)) |
Joerg Roedel | 0440d4c | 2007-10-24 12:49:50 +0200 | [diff] [blame] | 797 | gart_iommu_aperture_allowed = 1; |
Muli Ben-Yehuda | 17a941d | 2006-01-11 22:44:42 +0100 | [diff] [blame] | 798 | if (!strncmp(p, "memaper", 7)) { |
| 799 | fallback_aper_force = 1; |
| 800 | p += 7; |
| 801 | if (*p == '=') { |
| 802 | ++p; |
| 803 | if (get_option(&p, &arg)) |
| 804 | fallback_aper_order = arg; |
| 805 | } |
| 806 | } |
| 807 | } |