Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
| 4 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
| 5 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
| 6 | */ |
| 7 | #include <linux/dma-direct.h> |
| 8 | #include <linux/dma-noncoherent.h> |
| 9 | #include <linux/dma-contiguous.h> |
| 10 | #include <linux/highmem.h> |
| 11 | |
| 12 | #include <asm/cache.h> |
| 13 | #include <asm/cpu-type.h> |
| 14 | #include <asm/dma-coherence.h> |
| 15 | #include <asm/io.h> |
| 16 | |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 17 | /* |
| 18 | * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively |
| 19 | * fill random cachelines with stale data at any time, requiring an extra |
| 20 | * flush post-DMA. |
| 21 | * |
| 22 | * Warning on the terminology - Linux calls an uncached area coherent; MIPS |
| 23 | * terminology calls memory areas with hardware maintained coherency coherent. |
| 24 | * |
| 25 | * Note that the R14000 and R16000 should also be checked for in this condition. |
| 26 | * However this function is only called on non-I/O-coherent systems and only the |
| 27 | * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. |
| 28 | * SGI IP32 aka O2. |
| 29 | */ |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 30 | static inline bool cpu_needs_post_dma_flush(void) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 31 | { |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 32 | switch (boot_cpu_type()) { |
| 33 | case CPU_R10000: |
| 34 | case CPU_R12000: |
| 35 | case CPU_BMIPS5000: |
| 36 | return true; |
| 37 | default: |
| 38 | /* |
| 39 | * Presence of MAARs suggests that the CPU supports |
| 40 | * speculatively prefetching data, and therefore requires |
| 41 | * the post-DMA flush/invalidate. |
| 42 | */ |
| 43 | return cpu_has_maar; |
| 44 | } |
| 45 | } |
| 46 | |
Christoph Hellwig | 2e96e04 | 2019-04-28 13:57:39 -0500 | [diff] [blame] | 47 | void arch_dma_prep_coherent(struct page *page, size_t size) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 48 | { |
Christoph Hellwig | 2e96e04 | 2019-04-28 13:57:39 -0500 | [diff] [blame] | 49 | dma_cache_wback_inv((unsigned long)page_address(page), size); |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 50 | } |
| 51 | |
Christoph Hellwig | 2e96e04 | 2019-04-28 13:57:39 -0500 | [diff] [blame] | 52 | void *uncached_kernel_address(void *addr) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 53 | { |
Christoph Hellwig | 2e96e04 | 2019-04-28 13:57:39 -0500 | [diff] [blame] | 54 | return (void *)(__pa(addr) + UNCAC_BASE); |
| 55 | } |
| 56 | |
| 57 | void *cached_kernel_address(void *addr) |
| 58 | { |
| 59 | return __va(addr) - UNCAC_BASE; |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 60 | } |
| 61 | |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 62 | static inline void dma_sync_virt(void *addr, size_t size, |
| 63 | enum dma_data_direction dir) |
| 64 | { |
| 65 | switch (dir) { |
| 66 | case DMA_TO_DEVICE: |
| 67 | dma_cache_wback((unsigned long)addr, size); |
| 68 | break; |
| 69 | |
| 70 | case DMA_FROM_DEVICE: |
| 71 | dma_cache_inv((unsigned long)addr, size); |
| 72 | break; |
| 73 | |
| 74 | case DMA_BIDIRECTIONAL: |
| 75 | dma_cache_wback_inv((unsigned long)addr, size); |
| 76 | break; |
| 77 | |
| 78 | default: |
| 79 | BUG(); |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * A single sg entry may refer to multiple physically contiguous pages. But |
| 85 | * we still need to process highmem pages individually. If highmem is not |
| 86 | * configured then the bulk of this loop gets optimized out. |
| 87 | */ |
| 88 | static inline void dma_sync_phys(phys_addr_t paddr, size_t size, |
| 89 | enum dma_data_direction dir) |
| 90 | { |
| 91 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 92 | unsigned long offset = paddr & ~PAGE_MASK; |
| 93 | size_t left = size; |
| 94 | |
| 95 | do { |
| 96 | size_t len = left; |
| 97 | |
| 98 | if (PageHighMem(page)) { |
| 99 | void *addr; |
| 100 | |
Paul Burton | d411da0 | 2019-02-15 22:03:04 +0000 | [diff] [blame] | 101 | if (offset + len > PAGE_SIZE) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 102 | len = PAGE_SIZE - offset; |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 103 | |
| 104 | addr = kmap_atomic(page); |
| 105 | dma_sync_virt(addr + offset, len, dir); |
| 106 | kunmap_atomic(addr); |
| 107 | } else |
| 108 | dma_sync_virt(page_address(page) + offset, size, dir); |
| 109 | offset = 0; |
| 110 | page++; |
| 111 | left -= len; |
| 112 | } while (left); |
| 113 | } |
| 114 | |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 115 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
| 116 | enum dma_data_direction dir) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 117 | { |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 118 | dma_sync_phys(paddr, size, dir); |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 119 | } |
| 120 | |
Hauke Mehrtens | f263f2a | 2018-12-09 16:49:57 +0100 | [diff] [blame] | 121 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 122 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
| 123 | enum dma_data_direction dir) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 124 | { |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 125 | if (cpu_needs_post_dma_flush()) |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 126 | dma_sync_phys(paddr, size, dir); |
| 127 | } |
Hauke Mehrtens | f263f2a | 2018-12-09 16:49:57 +0100 | [diff] [blame] | 128 | #endif |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 129 | |
| 130 | void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 131 | enum dma_data_direction direction) |
| 132 | { |
| 133 | BUG_ON(direction == DMA_NONE); |
| 134 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 135 | dma_sync_virt(vaddr, size, direction); |
Christoph Hellwig | f8c55dc | 2018-06-15 13:08:46 +0200 | [diff] [blame] | 136 | } |
Christoph Hellwig | 347cb6a | 2019-01-07 13:36:20 -0500 | [diff] [blame] | 137 | |
| 138 | #ifdef CONFIG_DMA_PERDEV_COHERENT |
| 139 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 140 | const struct iommu_ops *iommu, bool coherent) |
| 141 | { |
| 142 | dev->dma_coherent = coherent; |
| 143 | } |
| 144 | #endif |