blob: 3ba87c22dfbcdaa4f6944c324361539b74ad670c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Fenghua Yu62fdd762008-10-17 12:14:13 -07002/*
3 * Dynamic DMA mapping support.
4 */
5
6#include <linux/types.h>
7#include <linux/mm.h>
8#include <linux/string.h>
9#include <linux/pci.h>
10#include <linux/module.h>
11#include <linux/dmar.h>
12#include <asm/iommu.h>
13#include <asm/machvec.h>
14#include <linux/dma-mapping.h>
15
Fenghua Yu62fdd762008-10-17 12:14:13 -070016
Suresh Siddhad3f13812011-08-23 17:05:25 -070017#ifdef CONFIG_INTEL_IOMMU
Fenghua Yu62fdd762008-10-17 12:14:13 -070018
19#include <linux/kernel.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070020
21#include <asm/page.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070022
23dma_addr_t bad_dma_address __read_mostly;
24EXPORT_SYMBOL(bad_dma_address);
25
26static int iommu_sac_force __read_mostly;
27
28int no_iommu __read_mostly;
29#ifdef CONFIG_IOMMU_DEBUG
30int force_iommu __read_mostly = 1;
31#else
32int force_iommu __read_mostly;
33#endif
34
Fenghua Yuaed5d5f2009-04-30 17:57:11 -070035int iommu_pass_through;
36
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090037extern struct dma_map_ops intel_dma_ops;
Fenghua Yu62fdd762008-10-17 12:14:13 -070038
39static int __init pci_iommu_init(void)
40{
41 if (iommu_detected)
42 intel_iommu_init();
43
44 return 0;
45}
46
47/* Must execute after PCI subsystem */
48fs_initcall(pci_iommu_init);
49
50void pci_iommu_shutdown(void)
51{
52 return;
53}
54
55void __init
56iommu_dma_init(void)
57{
58 return;
59}
60
Fenghua Yu62fdd762008-10-17 12:14:13 -070061int iommu_dma_supported(struct device *dev, u64 mask)
62{
Fenghua Yu62fdd762008-10-17 12:14:13 -070063 /* Copied from i386. Doesn't make much sense, because it will
64 only work for pci_alloc_coherent.
65 The caller just has to use GFP_DMA in this case. */
Yang Hongyang2f4f27d2009-04-06 19:01:18 -070066 if (mask < DMA_BIT_MASK(24))
Fenghua Yu62fdd762008-10-17 12:14:13 -070067 return 0;
68
69 /* Tell the device to use SAC when IOMMU force is on. This
70 allows the driver to use cheaper accesses in some cases.
71
72 Problem with this is that if we overflow the IOMMU area and
73 return DAC as fallback address the device may not handle it
74 correctly.
75
76 As a special case some controllers have a 39bit address
77 mode that is as efficient as 32bit (aic79xx). Don't force
78 SAC for these. Assume all masks <= 40 bits are of this
79 type. Normally this doesn't make any difference, but gives
80 more gentle handling of IOMMU overflow. */
Yang Hongyang50cf1562009-04-06 19:01:14 -070081 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
Matthew Wilcoxe088a4a2009-05-22 13:49:49 -070082 dev_info(dev, "Force SAC with mask %llx\n", mask);
Fenghua Yu62fdd762008-10-17 12:14:13 -070083 return 0;
84 }
85
86 return 1;
87}
88EXPORT_SYMBOL(iommu_dma_supported);
89
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090090void __init pci_iommu_alloc(void)
91{
92 dma_ops = &intel_dma_ops;
93
Bart Van Assche52997092017-01-20 13:04:01 -080094 intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
95 intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
96 intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
97 intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
98 intel_dma_ops.dma_supported = iommu_dma_supported;
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090099
100 /*
101 * The order of these functions is important for
102 * fall-back/fail-over reasons
103 */
104 detect_intel_iommu();
105
106#ifdef CONFIG_SWIOTLB
107 pci_swiotlb_init();
108#endif
109}
110
Fenghua Yu62fdd762008-10-17 12:14:13 -0700111#endif