blob: ed837383de5c827929e4b65352c7c6af747a1c03 [file] [log] [blame]
Thomas Gleixner09206542019-05-28 10:10:16 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Dynamic DMA mapping support for AMD Hammer.
Ingo Molnar05fccb02008-01-30 13:30:12 +01004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
6 * This allows to use PCI devices that only support 32bit addresses on systems
Ingo Molnar05fccb02008-01-30 13:30:12 +01007 * with more than 4GB.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
Mauro Carvalho Chehab985098a2020-06-23 09:09:10 +02009 * See Documentation/core-api/dma-api-howto.rst for the interface specification.
Ingo Molnar05fccb02008-01-30 13:30:12 +010010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Copyright 2002 Andi Kleen, SuSE Labs.
12 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/ctype.h>
16#include <linux/agp_backend.h>
17#include <linux/init.h>
18#include <linux/mm.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040019#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010020#include <linux/sched/debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
22#include <linux/spinlock.h>
23#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/topology.h>
25#include <linux/interrupt.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080026#include <linux/bitmap.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070027#include <linux/kdebug.h>
Jens Axboe9ee1bea2007-10-04 09:35:37 +020028#include <linux/scatterlist.h>
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080029#include <linux/iommu-helper.h>
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +010030#include <linux/syscore_ops.h>
Joerg Roedel237a6222008-09-25 12:13:53 +020031#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Arun Sharma600634972011-07-26 16:09:06 -070033#include <linux/atomic.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010034#include <linux/dma-direct.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020035#include <linux/dma-map-ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/mtrr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090038#include <asm/iommu.h>
Joerg Roedel395624f2007-10-24 12:49:47 +020039#include <asm/gart.h>
Laura Abbottd1163652017-05-08 15:58:11 -070040#include <asm/set_memory.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010041#include <asm/swiotlb.h>
42#include <asm/dma.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020043#include <asm/amd_nb.h>
FUJITA Tomonori338bac52009-10-27 16:34:44 +090044#include <asm/x86_init.h>
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -040045#include <asm/iommu_table.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Joerg Roedel79da0872007-10-24 12:49:49 +020047static unsigned long iommu_bus_base; /* GART remapping area (physical) */
Ingo Molnar05fccb02008-01-30 13:30:12 +010048static unsigned long iommu_size; /* size of remapping area bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static unsigned long iommu_pages; /* .. and in pages */
50
Ingo Molnar05fccb02008-01-30 13:30:12 +010051static u32 *iommu_gatt_base; /* Remapping table */
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Ingo Molnar05fccb02008-01-30 13:30:12 +010053/*
54 * If this is disabled the IOMMU will use an optimized flushing strategy
55 * of only flushing when an mapping is reused. With it true the GART is
56 * flushed for every mapping. Problem is that doing the lazy flush seems
57 * to trigger bugs with some popular PCI cards, in particular 3ware (but
58 * has been also also seen with Qlogic at least).
59 */
Jaswinder Singh Rajputc854c912008-12-29 20:38:09 +053060static int iommu_fullflush = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Ingo Molnar05fccb02008-01-30 13:30:12 +010062/* Allocation bitmap for the remapping area: */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063static DEFINE_SPINLOCK(iommu_bitmap_lock);
Ingo Molnar05fccb02008-01-30 13:30:12 +010064/* Guarded by iommu_bitmap_lock: */
65static unsigned long *iommu_gart_bitmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Ingo Molnar05fccb02008-01-30 13:30:12 +010067static u32 gart_unmapped_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69#define GPTE_VALID 1
70#define GPTE_COHERENT 2
71#define GPTE_ENCODE(x) \
72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#ifdef CONFIG_AGP
76#define AGPEXTERN extern
77#else
78#define AGPEXTERN
79#endif
80
Joerg Roedel665d3e22011-04-18 15:45:46 +020081/* GART can only remap to physical addresses < 1TB */
82#define GART_MAX_PHYS_ADDR (1ULL << 40)
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/* backdoor interface to AGP driver */
85AGPEXTERN int agp_memory_reserved;
86AGPEXTERN __u32 *agp_gatt_table;
87
88static unsigned long next_bit; /* protected by iommu_bitmap_lock */
Joerg Roedel3610f212008-09-25 12:13:54 +020089static bool need_flush; /* global flush state. set for each gart wrap */
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +090091static unsigned long alloc_iommu(struct device *dev, int size,
92 unsigned long align_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +010093{
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 unsigned long offset, flags;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -080095 unsigned long boundary_size;
96 unsigned long base_index;
97
98 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
99 PAGE_SIZE) >> PAGE_SHIFT;
Nicolin Chen1e9d90d2020-09-01 15:16:45 -0700100 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Ingo Molnar05fccb02008-01-30 13:30:12 +0100102 spin_lock_irqsave(&iommu_bitmap_lock, flags);
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800103 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900104 size, base_index, boundary_size, align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 if (offset == -1) {
Joerg Roedel3610f212008-09-25 12:13:54 +0200106 need_flush = true;
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800107 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900108 size, base_index, boundary_size,
109 align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100111 if (offset != -1) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100112 next_bit = offset+size;
113 if (next_bit >= iommu_pages) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 next_bit = 0;
Joerg Roedel3610f212008-09-25 12:13:54 +0200115 need_flush = true;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100116 }
117 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 if (iommu_fullflush)
Joerg Roedel3610f212008-09-25 12:13:54 +0200119 need_flush = true;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100120 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 return offset;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100123}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125static void free_iommu(unsigned long offset, int size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100126{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Akinobu Mitaa66022c2009-12-15 16:48:28 -0800130 bitmap_clear(iommu_gart_bitmap, offset, size);
Joerg Roedel70d7d352008-12-02 20:16:03 +0100131 if (offset >= next_bit)
132 next_bit = offset + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100134}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Ingo Molnar05fccb02008-01-30 13:30:12 +0100136/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 * Use global flush state to avoid races with multiple flushers.
138 */
Andi Kleena32073b2006-06-26 13:56:40 +0200139static void flush_gart(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100140{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 unsigned long flags;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 spin_lock_irqsave(&iommu_bitmap_lock, flags);
Andi Kleena32073b2006-06-26 13:56:40 +0200144 if (need_flush) {
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200145 amd_flush_garts();
Joerg Roedel3610f212008-09-25 12:13:54 +0200146 need_flush = false;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100149}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151#ifdef CONFIG_IOMMU_LEAK
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Debugging aid for drivers that don't free their IOMMU tables */
Joerg Roedel79da0872007-10-24 12:49:49 +0200153static void dump_leak(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Ingo Molnar05fccb02008-01-30 13:30:12 +0100155 static int dump;
156
FUJITA Tomonori19c1a6f2009-04-14 09:43:19 +0900157 if (dump)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100158 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 dump = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100160
Dmitry Safonov9cb8f062020-06-08 21:32:29 -0700161 show_stack(NULL, NULL, KERN_ERR);
FUJITA Tomonori19c1a6f2009-04-14 09:43:19 +0900162 debug_dma_dump_mappings(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164#endif
165
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100166static void iommu_full(struct device *dev, size_t size, int dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Ingo Molnar05fccb02008-01-30 13:30:12 +0100168 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * Ran out of IOMMU space for this operation. This is very bad.
170 * Unfortunately the drivers cannot handle this operation properly.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100171 * Return some non mapped prereserved space in the aperture and
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * let the Northbridge deal with it. This will result in garbage
173 * in the IO operation. When the size exceeds the prereserved space
Ingo Molnar05fccb02008-01-30 13:30:12 +0100174 * memory corruption will occur or random memory will be DMAed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 * out. Hopefully no network devices use single mappings that big.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100176 */
177
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200178 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#ifdef CONFIG_IOMMU_LEAK
Ingo Molnar05fccb02008-01-30 13:30:12 +0100180 dump_leak();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Ingo Molnar05fccb02008-01-30 13:30:12 +0100184static inline int
185need_iommu(struct device *dev, unsigned long addr, size_t size)
186{
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100187 return force_iommu || !dma_capable(dev, addr, size, true);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100188}
189
190static inline int
191nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
192{
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100193 return !dma_capable(dev, addr, size, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
196/* Map a single continuous physical area into the IOMMU.
197 * Caller needs to check if the iommu is needed and flush.
198 */
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100199static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900200 size_t size, int dir, unsigned long align_mask)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100201{
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700202 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
Joerg Roedel665d3e22011-04-18 15:45:46 +0200203 unsigned long iommu_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 int i;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100205
Joerg Roedel665d3e22011-04-18 15:45:46 +0200206 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
Christoph Hellwig9e8aa6b2018-11-21 19:20:44 +0100207 return DMA_MAPPING_ERROR;
Joerg Roedel665d3e22011-04-18 15:45:46 +0200208
209 iommu_page = alloc_iommu(dev, npages, align_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 if (iommu_page == -1) {
211 if (!nonforced_iommu(dev, phys_mem, size))
Ingo Molnar05fccb02008-01-30 13:30:12 +0100212 return phys_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 if (panic_on_overflow)
214 panic("dma_map_area overflow %lu bytes\n", size);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100215 iommu_full(dev, size, dir);
Christoph Hellwig9e8aa6b2018-11-21 19:20:44 +0100216 return DMA_MAPPING_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218
219 for (i = 0; i < npages; i++) {
220 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 phys_mem += PAGE_SIZE;
222 }
223 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
224}
225
226/* Map a single area into the IOMMU */
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900227static dma_addr_t gart_map_page(struct device *dev, struct page *page,
228 unsigned long offset, size_t size,
229 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700230 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Ingo Molnar2be62142008-04-19 19:19:56 +0200232 unsigned long bus;
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900233 phys_addr_t paddr = page_to_phys(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Ingo Molnar2be62142008-04-19 19:19:56 +0200235 if (!need_iommu(dev, paddr, size))
236 return paddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900238 bus = dma_map_area(dev, paddr, size, dir, 0);
239 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100240
241 return bus;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100242}
243
244/*
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200245 * Free a DMA mapping.
246 */
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900247static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
248 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700249 unsigned long attrs)
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200250{
251 unsigned long iommu_page;
252 int npages;
253 int i;
254
Christoph Hellwig06f55fd2019-01-04 09:50:33 +0100255 if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
256 return;
257
258 /*
259 * This driver will not always use a GART mapping, but might have
260 * created a direct mapping instead. If that is the case there is
261 * nothing to unmap here.
262 */
263 if (dma_addr < iommu_bus_base ||
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200264 dma_addr >= iommu_bus_base + iommu_size)
265 return;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100266
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200267 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700268 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200269 for (i = 0; i < npages; i++) {
270 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
Jon Mason7c2d9cd2006-06-26 13:56:37 +0200271 }
272 free_iommu(iommu_page, npages);
273}
274
275/*
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100276 * Wrapper for pci_unmap_single working with scatterlists.
277 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900278static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700279 enum dma_data_direction dir, unsigned long attrs)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100280{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200281 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100282 int i;
283
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200284 for_each_sg(sg, s, nents, i) {
Jon Mason60b08c62006-02-26 04:18:22 +0100285 if (!s->dma_length || !s->length)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100286 break;
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700287 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100288 }
289}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291/* Fallback for dma_map_sg in case of overflow */
292static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
293 int nents, int dir)
294{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200295 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 int i;
297
298#ifdef CONFIG_IOMMU_DEBUG
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900299 pr_debug("dma_map_sg overflow\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300#endif
301
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200302 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200303 unsigned long addr = sg_phys(s);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100304
305 if (nonforced_iommu(dev, addr, s->length)) {
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900306 addr = dma_map_area(dev, addr, s->length, dir, 0);
Christoph Hellwig9e8aa6b2018-11-21 19:20:44 +0100307 if (addr == DMA_MAPPING_ERROR) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100308 if (i > 0)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700309 gart_unmap_sg(dev, sg, i, dir, 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100310 nents = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 sg[0].dma_length = 0;
312 break;
313 }
314 }
315 s->dma_address = addr;
316 s->dma_length = s->length;
317 }
Andi Kleena32073b2006-06-26 13:56:40 +0200318 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 return nents;
321}
322
323/* Map multiple scatterlist entries continuous into the first. */
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800324static int __dma_map_cont(struct device *dev, struct scatterlist *start,
325 int nelems, struct scatterlist *sout,
326 unsigned long pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
FUJITA Tomonori7b22ff52008-08-18 00:36:18 +0900328 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100329 unsigned long iommu_page = iommu_start;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200330 struct scatterlist *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 int i;
332
333 if (iommu_start == -1)
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600334 return -ENOMEM;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200335
336 for_each_sg(start, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 unsigned long pages, addr;
338 unsigned long phys_addr = s->dma_address;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100339
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200340 BUG_ON(s != start && s->offset);
341 if (s == start) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 sout->dma_address = iommu_bus_base;
343 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
344 sout->dma_length = s->length;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100345 } else {
346 sout->dma_length += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348
349 addr = phys_addr;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700350 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100351 while (pages--) {
352 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 addr += PAGE_SIZE;
354 iommu_page++;
Andi Kleen0d5410642006-02-12 14:34:59 -0800355 }
Ingo Molnar05fccb02008-01-30 13:30:12 +0100356 }
357 BUG_ON(iommu_page - iommu_start != pages);
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return 0;
360}
361
Ingo Molnar05fccb02008-01-30 13:30:12 +0100362static inline int
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800363dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
364 struct scatterlist *sout, unsigned long pages, int need)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200366 if (!need) {
367 BUG_ON(nelems != 1);
FUJITA Tomonorie88a39d2007-10-25 09:13:32 +0200368 sout->dma_address = start->dma_address;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200369 sout->dma_length = start->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return 0;
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200371 }
FUJITA Tomonorifde9a102008-02-04 22:28:11 -0800372 return __dma_map_cont(dev, start, nelems, sout, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
Ingo Molnar05fccb02008-01-30 13:30:12 +0100374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375/*
376 * DMA map all entries in a scatterlist.
Ingo Molnar05fccb02008-01-30 13:30:12 +0100377 * Merge chunks that have page aligned sizes into a continuous mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 */
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900379static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700380 enum dma_data_direction dir, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200382 struct scatterlist *s, *ps, *start_sg, *sgmap;
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600383 int need = 0, nextneed, i, out, start, ret;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100384 unsigned long pages = 0;
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800385 unsigned int seg_size;
386 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Ingo Molnar05fccb02008-01-30 13:30:12 +0100388 if (nents == 0)
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600389 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900391 out = 0;
392 start = 0;
393 start_sg = sg;
394 sgmap = sg;
395 seg_size = 0;
396 max_seg_size = dma_get_max_seg_size(dev);
397 ps = NULL; /* shut up gcc */
398
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200399 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200400 dma_addr_t addr = sg_phys(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Ingo Molnar05fccb02008-01-30 13:30:12 +0100402 s->dma_address = addr;
403 BUG_ON(s->length == 0);
404
405 nextneed = need_iommu(dev, addr, s->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 /* Handle the previous not yet processed entries */
408 if (i > start) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100409 /*
410 * Can only merge when the last chunk ends on a
411 * page boundary and the new one doesn't have an
412 * offset.
413 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (!iommu_merge || !nextneed || !need || s->offset ||
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800415 (s->length + seg_size > max_seg_size) ||
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200416 (ps->offset + ps->length) % PAGE_SIZE) {
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600417 ret = dma_map_cont(dev, start_sg, i - start,
418 sgmap, pages, need);
419 if (ret < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 goto error;
421 out++;
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900422
423 seg_size = 0;
424 sgmap = sg_next(sgmap);
425 pages = 0;
426 start = i;
427 start_sg = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 }
429 }
430
FUJITA Tomonori42d00282008-02-04 22:27:56 -0800431 seg_size += s->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 need = nextneed;
Joerg Roedel1477b8e2008-10-15 22:02:11 -0700433 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200434 ps = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 }
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600436 ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need);
437 if (ret < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 goto error;
439 out++;
Andi Kleena32073b2006-06-26 13:56:40 +0200440 flush_gart();
Jens Axboe9ee1bea2007-10-04 09:35:37 +0200441 if (out < nents) {
442 sgmap = sg_next(sgmap);
443 sgmap->dma_length = 0;
444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return out;
446
447error:
Andi Kleena32073b2006-06-26 13:56:40 +0200448 flush_gart();
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700449 gart_unmap_sg(dev, sg, out, dir, 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100450
Kevin VanMarena1002a42006-02-03 21:51:32 +0100451 /* When it was forced or merged try again in a dumb way */
452 if (force_iommu || iommu_merge) {
453 out = dma_map_sg_nonforce(dev, sg, nents, dir);
454 if (out > 0)
455 return out;
456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 if (panic_on_overflow)
458 panic("dma_map_sg: overflow on %lu pages\n", pages);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100459
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100460 iommu_full(dev, pages << PAGE_SHIFT, dir);
Martin Oliveirafcacc8a2021-07-29 14:15:36 -0600461 return ret;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100462}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Joerg Roedel94581092008-08-19 16:32:39 +0200464/* allocate and map a coherent mapping */
465static void *
466gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700467 gfp_t flag, unsigned long attrs)
Joerg Roedel94581092008-08-19 16:32:39 +0200468{
Christoph Hellwig51c7eeb2018-03-19 11:38:18 +0100469 void *vaddr;
Joerg Roedel94581092008-08-19 16:32:39 +0200470
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200471 vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
Christoph Hellwig51c7eeb2018-03-19 11:38:18 +0100472 if (!vaddr ||
473 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
474 return vaddr;
Joerg Roedel94581092008-08-19 16:32:39 +0200475
Christoph Hellwig51c7eeb2018-03-19 11:38:18 +0100476 *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
477 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
478 flush_gart();
Christoph Hellwig9e8aa6b2018-11-21 19:20:44 +0100479 if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
Christoph Hellwig51c7eeb2018-03-19 11:38:18 +0100480 goto out_free;
481 return vaddr;
482out_free:
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200483 dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
Joerg Roedel94581092008-08-19 16:32:39 +0200484 return NULL;
485}
486
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200487/* free a coherent mapping */
488static void
489gart_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700490 dma_addr_t dma_addr, unsigned long attrs)
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200491{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700492 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
Christoph Hellwig2f5388a22020-08-17 17:06:40 +0200493 dma_direct_free(dev, size, vaddr, dma_addr, attrs);
Joerg Roedel43a5a5a2008-08-19 16:32:40 +0200494}
495
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100496static int no_agp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100499{
500 unsigned long a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Ingo Molnar05fccb02008-01-30 13:30:12 +0100502 if (!iommu_size) {
503 iommu_size = aper_size;
504 if (!no_agp)
505 iommu_size /= 2;
506 }
507
508 a = aper + iommu_size;
Andi Kleen31422c52008-02-04 16:48:08 +0100509 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Ingo Molnar05fccb02008-01-30 13:30:12 +0100511 if (iommu_size < 64*1024*1024) {
Kefeng Wang8d3bcc42019-10-18 11:18:24 +0800512 pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
Ingo Molnar05fccb02008-01-30 13:30:12 +0100513 " Consider increasing the AGP aperture in BIOS\n",
Kefeng Wang8d3bcc42019-10-18 11:18:24 +0800514 iommu_size >> 20);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100515 }
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return iommu_size;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100518}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Ingo Molnar05fccb02008-01-30 13:30:12 +0100520static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
521{
522 unsigned aper_size = 0, aper_base_32, aper_order;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 u64 aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200525 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
526 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100527 aper_order = (aper_order >> 1) & 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Ingo Molnar05fccb02008-01-30 13:30:12 +0100529 aper_base = aper_base_32 & 0x7fff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 aper_base <<= 25;
531
Ingo Molnar05fccb02008-01-30 13:30:12 +0100532 aper_size = (32 * 1024 * 1024) << aper_order;
533 if (aper_base + aper_size > 0x100000000UL || !aper_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 aper_base = 0;
535
536 *size = aper_size;
537 return aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100538}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200540static void enable_gart_translations(void)
541{
542 int i;
543
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200544 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200545 return;
546
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200547 for (i = 0; i < amd_nb_num(); i++) {
548 struct pci_dev *dev = node_to_amd_nb(i)->misc;
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200549
550 enable_gart_translation(dev, __pa(agp_gatt_table));
551 }
Joerg Roedel4b838732010-04-07 12:57:35 +0200552
553 /* Flush the GART-TLB to remove stale entries */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200554 amd_flush_garts();
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200555}
556
557/*
558 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
559 * resume in the same way as they are handled in gart_iommu_hole_init().
560 */
561static bool fix_up_north_bridges;
562static u32 aperture_order;
563static u32 aperture_alloc;
564
565void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
566{
567 fix_up_north_bridges = true;
568 aperture_order = aper_order;
569 aperture_alloc = aper_alloc;
570}
571
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100572static void gart_fixup_northbridges(void)
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900573{
574 int i;
575
576 if (!fix_up_north_bridges)
577 return;
578
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200579 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200580 return;
581
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900582 pr_info("PCI-DMA: Restoring GART aperture settings\n");
583
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200584 for (i = 0; i < amd_nb_num(); i++) {
585 struct pci_dev *dev = node_to_amd_nb(i)->misc;
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900586
587 /*
588 * Don't enable translations just yet. That is the next
589 * step. Restore the pre-suspend aperture settings.
590 */
Borislav Petkov260133a2010-09-03 18:39:40 +0200591 gart_set_size_and_enable(dev, aperture_order);
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900592 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
593 }
594}
595
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100596static void gart_resume(void)
Pavel Machekcd763742008-05-29 00:30:21 -0700597{
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900598 pr_info("PCI-DMA: Resuming GART IOMMU\n");
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200599
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100600 gart_fixup_northbridges();
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200601
602 enable_gart_translations();
Pavel Machekcd763742008-05-29 00:30:21 -0700603}
604
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100605static struct syscore_ops gart_syscore_ops = {
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900606 .resume = gart_resume,
Pavel Machekcd763742008-05-29 00:30:21 -0700607
608};
609
Ingo Molnar05fccb02008-01-30 13:30:12 +0100610/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 * Private Northbridge GATT initialization in case we cannot use the
Ingo Molnar05fccb02008-01-30 13:30:12 +0100612 * AGP driver for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200614static __init int init_amd_gatt(struct agp_kern_info *info)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100615{
616 unsigned aper_size, gatt_size, new_aper_size;
617 unsigned aper_base, new_aper_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 struct pci_dev *dev;
619 void *gatt;
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100620 int i;
Andi Kleena32073b2006-06-26 13:56:40 +0200621
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900622 pr_info("PCI-DMA: Disabling AGP.\n");
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 aper_size = aper_base = info->aper_size = 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200625 dev = NULL;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200626 for (i = 0; i < amd_nb_num(); i++) {
627 dev = node_to_amd_nb(i)->misc;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100628 new_aper_base = read_aperture(dev, &new_aper_size);
629 if (!new_aper_base)
630 goto nommu;
631
632 if (!aper_base) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 aper_size = new_aper_size;
634 aper_base = new_aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100635 }
636 if (aper_size != new_aper_size || aper_base != new_aper_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 goto nommu;
638 }
639 if (!aper_base)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100640 goto nommu;
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 info->aper_base = aper_base;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100643 info->aper_size = aper_size >> 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Ingo Molnar05fccb02008-01-30 13:30:12 +0100645 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
Joerg Roedel01142672008-09-25 12:42:12 +0200646 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
647 get_order(gatt_size));
Ingo Molnar05fccb02008-01-30 13:30:12 +0100648 if (!gatt)
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200649 panic("Cannot allocate GATT table");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100650 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200651 panic("Could not set GART PTEs to uncacheable pages");
Joachim Deguaracf6387d2007-04-24 13:05:36 +0200652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 agp_gatt_table = gatt;
Andi Kleena32073b2006-06-26 13:56:40 +0200654
Rafael J. Wysockif3c6ea12011-03-23 22:15:54 +0100655 register_syscore_ops(&gart_syscore_ops);
Rafael J. Wysocki6703f6d2008-06-10 00:10:48 +0200656
Andi Kleena32073b2006-06-26 13:56:40 +0200657 flush_gart();
Ingo Molnar05fccb02008-01-30 13:30:12 +0100658
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900659 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
Ingo Molnar05fccb02008-01-30 13:30:12 +0100660 aper_base, aper_size>>10);
Yinghai Lu7ab073b2008-07-12 14:30:35 -0700661
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return 0;
663
664 nommu:
Ingo Molnar05fccb02008-01-30 13:30:12 +0100665 /* Should not happen anymore */
Kefeng Wang8d3bcc42019-10-18 11:18:24 +0800666 pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100667 return -1;
668}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Bart Van Assche52997092017-01-20 13:04:01 -0800670static const struct dma_map_ops gart_dma_ops = {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100671 .map_sg = gart_map_sg,
672 .unmap_sg = gart_unmap_sg,
FUJITA Tomonori052aedb2009-01-05 23:47:23 +0900673 .map_page = gart_map_page,
674 .unmap_page = gart_unmap_page,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200675 .alloc = gart_alloc_coherent,
676 .free = gart_free_coherent,
Christoph Hellwigf9f32322019-08-06 15:01:50 +0300677 .mmap = dma_common_mmap,
678 .get_sgtable = dma_common_get_sgtable,
Christoph Hellwigfec777c2018-03-19 11:38:15 +0100679 .dma_supported = dma_direct_supported,
Christoph Hellwig249baa52019-08-06 15:01:38 +0300680 .get_required_mask = dma_direct_get_required_mask,
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200681 .alloc_pages = dma_direct_alloc_pages,
682 .free_pages = dma_direct_free_pages,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100683};
684
FUJITA Tomonori338bac52009-10-27 16:34:44 +0900685static void gart_iommu_shutdown(void)
Yinghai Lubc2cea62007-07-21 17:11:28 +0200686{
687 struct pci_dev *dev;
688 int i;
689
Yinghai Luf3eee542009-12-14 11:52:15 +0900690 /* don't shutdown it if there is AGP installed */
691 if (!no_agp)
Yinghai Lubc2cea62007-07-21 17:11:28 +0200692 return;
693
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200694 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200695 return;
696
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200697 for (i = 0; i < amd_nb_num(); i++) {
Ingo Molnar05fccb02008-01-30 13:30:12 +0100698 u32 ctl;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200699
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200700 dev = node_to_amd_nb(i)->misc;
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200701 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
Yinghai Lubc2cea62007-07-21 17:11:28 +0200702
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200703 ctl &= ~GARTEN;
Yinghai Lubc2cea62007-07-21 17:11:28 +0200704
Pavel Machek3bb6fbf2008-04-15 12:43:57 +0200705 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100706 }
Yinghai Lubc2cea62007-07-21 17:11:28 +0200707}
708
FUJITA Tomonoride957622009-11-10 19:46:14 +0900709int __init gart_iommu_init(void)
Ingo Molnar05fccb02008-01-30 13:30:12 +0100710{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 struct agp_kern_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 unsigned long iommu_start;
Yinghai Lud99e9012008-10-04 15:55:12 -0700713 unsigned long aper_base, aper_size;
714 unsigned long start_pfn, end_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 unsigned long scratch;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200717 if (!amd_nb_has_feature(AMD_NB_GART))
FUJITA Tomonoride957622009-11-10 19:46:14 +0900718 return 0;
Andi Kleena32073b2006-06-26 13:56:40 +0200719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720#ifndef CONFIG_AGP_AMD64
Ingo Molnar05fccb02008-01-30 13:30:12 +0100721 no_agp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722#else
723 /* Makefile puts PCI initialization via subsys_initcall first. */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200724 /* Add other AMD AGP bridge drivers here */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100725 no_agp = no_agp ||
726 (agp_amd64_init() < 0) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 (agp_copy_info(agp_bridge, &info) < 0);
Ingo Molnar05fccb02008-01-30 13:30:12 +0100728#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (no_iommu ||
Yinghai Luc987d122008-06-24 22:14:09 -0700731 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200732 !gart_iommu_aperture ||
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200733 (no_agp && init_amd_gatt(&info) < 0)) {
Yinghai Luc987d122008-06-24 22:14:09 -0700734 if (max_pfn > MAX_DMA32_PFN) {
Kefeng Wang8d3bcc42019-10-18 11:18:24 +0800735 pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
736 pr_warn("falling back to iommu=soft.\n");
Jon Mason5b7b6442006-02-03 21:51:59 +0100737 }
FUJITA Tomonoride957622009-11-10 19:46:14 +0900738 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
740
Yinghai Lud99e9012008-10-04 15:55:12 -0700741 /* need to map that range */
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900742 aper_size = info.aper_size << 20;
743 aper_base = info.aper_base;
744 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
745
Yinghai Lu51017302012-11-16 19:38:50 -0800746 start_pfn = PFN_DOWN(aper_base);
747 if (!pfn_range_is_mapped(start_pfn, end_pfn))
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700748 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
749 PAGE_KERNEL);
Yinghai Lud99e9012008-10-04 15:55:12 -0700750
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900751 pr_info("PCI-DMA: using GART IOMMU.\n");
Ingo Molnar05fccb02008-01-30 13:30:12 +0100752 iommu_size = check_iommu_size(info.aper_base, aper_size);
753 iommu_pages = iommu_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Joerg Roedel01142672008-09-25 12:42:12 +0200755 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
Ingo Molnar05fccb02008-01-30 13:30:12 +0100756 get_order(iommu_pages/8));
757 if (!iommu_gart_bitmap)
758 panic("Cannot allocate iommu bitmap\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900760 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
Ingo Molnar05fccb02008-01-30 13:30:12 +0100761 iommu_size >> 20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900763 agp_memory_reserved = iommu_size;
764 iommu_start = aper_size - iommu_size;
765 iommu_bus_base = info.aper_base + iommu_start;
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900766 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Ingo Molnar05fccb02008-01-30 13:30:12 +0100768 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 * Unmap the IOMMU part of the GART. The alias of the page is
770 * always mapped with cache enabled and there is no full cache
771 * coherency across the GART remapping. The unmapping avoids
772 * automatic prefetches from the CPU allocating cache lines in
773 * there. All CPU accesses are done via the direct mapping to
774 * the backing memory. The GART address is only used by PCI
Ingo Molnar05fccb02008-01-30 13:30:12 +0100775 * devices.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 */
Andi Kleen28d6ee42008-02-04 16:48:08 +0100777 set_memory_np((unsigned long)__va(iommu_bus_base),
778 iommu_size >> PAGE_SHIFT);
Ingo Molnar184652e2008-02-14 23:30:20 +0100779 /*
780 * Tricky. The GART table remaps the physical memory range,
781 * so the CPU wont notice potential aliases and if the memory
782 * is remapped to UC later on, we might surprise the PCI devices
783 * with a stray writeout of a cacheline. So play it sure and
784 * do an explicit, full-scale wbinvd() _after_ having marked all
785 * the pages as Not-Present:
786 */
787 wbinvd();
Ingo Molnar123bf0e2009-11-15 21:19:52 +0900788
Mark Langsdorffe2245c2009-07-05 15:50:52 -0500789 /*
790 * Now all caches are flushed and we can safely enable
791 * GART hardware. Doing it early leaves the possibility
792 * of stale cache entries that can lead to GART PTE
793 * errors.
794 */
795 enable_gart_translations();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Ingo Molnar05fccb02008-01-30 13:30:12 +0100797 /*
Pavel Machekfa3d3192008-06-26 00:25:43 +0200798 * Try to workaround a bug (thanks to BenH):
Ingo Molnar05fccb02008-01-30 13:30:12 +0100799 * Set unmapped entries to a scratch page instead of 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 * Any prefetches that hit unmapped entries won't get an bus abort
Pavel Machekfa3d3192008-06-26 00:25:43 +0200801 * then. (P2P bridge may be prefetching on DMA reads).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100803 scratch = get_zeroed_page(GFP_KERNEL);
804 if (!scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 panic("Cannot allocate iommu scratch page");
806 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Andi Kleena32073b2006-06-26 13:56:40 +0200808 flush_gart();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100809 dma_ops = &gart_dma_ops;
FUJITA Tomonori338bac52009-10-27 16:34:44 +0900810 x86_platform.iommu_shutdown = gart_iommu_shutdown;
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +0900811 swiotlb = 0;
FUJITA Tomonoride957622009-11-10 19:46:14 +0900812
813 return 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100814}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Sam Ravnborg43999d92007-03-16 21:07:36 +0100816void __init gart_parse_options(char *p)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100817{
818 int arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100820 if (isdigit(*p) && get_option(&p, &arg))
821 iommu_size = arg;
Joe Perches41855b72009-11-09 17:58:50 -0800822 if (!strncmp(p, "fullflush", 9))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100823 iommu_fullflush = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100824 if (!strncmp(p, "nofullflush", 11))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100825 iommu_fullflush = 0;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100826 if (!strncmp(p, "noagp", 5))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100827 no_agp = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100828 if (!strncmp(p, "noaperture", 10))
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100829 fix_aperture = 0;
830 /* duplicated from pci-dma.c */
Ingo Molnar05fccb02008-01-30 13:30:12 +0100831 if (!strncmp(p, "force", 5))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200832 gart_iommu_aperture_allowed = 1;
Ingo Molnar05fccb02008-01-30 13:30:12 +0100833 if (!strncmp(p, "allowed", 7))
Joerg Roedel0440d4c2007-10-24 12:49:50 +0200834 gart_iommu_aperture_allowed = 1;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100835 if (!strncmp(p, "memaper", 7)) {
836 fallback_aper_force = 1;
837 p += 7;
838 if (*p == '=') {
839 ++p;
840 if (get_option(&p, &arg))
841 fallback_aper_order = arg;
842 }
843 }
844}
Konrad Rzeszutek Wilk22e6daf2010-08-26 13:58:03 -0400845IOMMU_INIT_POST(gart_iommu_hole_init);