Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 4 | */ |
| 5 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 6 | #include <linux/dma-noncoherent.h> |
Alexey Brodkin | f2b0b25 | 2015-05-25 19:54:28 +0300 | [diff] [blame] | 7 | #include <asm/cache.h> |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 8 | #include <asm/cacheflush.h> |
| 9 | |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 10 | /* |
Christoph Hellwig | f73c904 | 2019-06-14 16:26:41 +0200 | [diff] [blame] | 11 | * ARCH specific callbacks for generic noncoherent DMA ops |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 12 | * - hardware IOC not available (or "dma-coherent" not set for device in DT) |
| 13 | * - But still handle both coherent and non-coherent requests from caller |
| 14 | * |
| 15 | * For DMA coherent hardware (IOC) generic code suffices |
| 16 | */ |
Christoph Hellwig | f73c904 | 2019-06-14 16:26:41 +0200 | [diff] [blame] | 17 | |
| 18 | void arch_dma_prep_coherent(struct page *page, size_t size) |
Vineet Gupta | 1162b07 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 19 | { |
Vineet Gupta | 795f455 | 2015-04-03 12:37:07 +0300 | [diff] [blame] | 20 | /* |
| 21 | * Evict any existing L1 and/or L2 lines for the backing page |
| 22 | * in case it was used earlier as a normal "cached" page. |
| 23 | * Yeah this bit us - STAR 9000898266 |
| 24 | * |
| 25 | * Although core does call flush_cache_vmap(), it gets kvaddr hence |
| 26 | * can't be used to efficiently flush L1 and/or L2 which need paddr |
| 27 | * Currently flush_cache_vmap nukes the L1 cache completely which |
| 28 | * will be optimized as a separate commit |
| 29 | */ |
Christoph Hellwig | f73c904 | 2019-06-14 16:26:41 +0200 | [diff] [blame] | 30 | dma_cache_wback_inv(page_to_phys(page), size); |
Alexey Brodkin | a79a812 | 2016-11-03 18:06:13 +0300 | [diff] [blame] | 31 | } |
| 32 | |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 33 | /* |
| 34 | * Cache operations depending on function and direction argument, inspired by |
| 35 | * https://lkml.org/lkml/2018/5/18/979 |
| 36 | * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] |
| 37 | * dma-mapping: provide a generic dma-noncoherent implementation)" |
| 38 | * |
| 39 | * | map == for_device | unmap == for_cpu |
| 40 | * |---------------------------------------------------------------- |
| 41 | * TO_DEV | writeback writeback | none none |
| 42 | * FROM_DEV | invalidate invalidate | invalidate* invalidate* |
| 43 | * BIDIR | writeback+inv writeback+inv | invalidate invalidate |
| 44 | * |
| 45 | * [*] needed for CPU speculative prefetches |
| 46 | * |
| 47 | * NOTE: we don't check the validity of direction argument as it is done in |
| 48 | * upper layer functions (in include/linux/dma-mapping.h) |
| 49 | */ |
| 50 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 51 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
| 52 | size_t size, enum dma_data_direction dir) |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 53 | { |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 54 | switch (dir) { |
| 55 | case DMA_TO_DEVICE: |
| 56 | dma_cache_wback(paddr, size); |
| 57 | break; |
| 58 | |
| 59 | case DMA_FROM_DEVICE: |
| 60 | dma_cache_inv(paddr, size); |
| 61 | break; |
| 62 | |
| 63 | case DMA_BIDIRECTIONAL: |
| 64 | dma_cache_wback_inv(paddr, size); |
| 65 | break; |
| 66 | |
| 67 | default: |
| 68 | break; |
| 69 | } |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 70 | } |
| 71 | |
Christoph Hellwig | 6c3e71d | 2018-05-18 15:41:32 +0200 | [diff] [blame] | 72 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
| 73 | size_t size, enum dma_data_direction dir) |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 74 | { |
Eugeniy Paltsev | 4c612ad | 2018-07-24 17:13:02 +0300 | [diff] [blame] | 75 | switch (dir) { |
| 76 | case DMA_TO_DEVICE: |
| 77 | break; |
| 78 | |
| 79 | /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ |
| 80 | case DMA_FROM_DEVICE: |
| 81 | case DMA_BIDIRECTIONAL: |
| 82 | dma_cache_inv(paddr, size); |
| 83 | break; |
| 84 | |
| 85 | default: |
| 86 | break; |
| 87 | } |
Christoph Hellwig | 713a746 | 2018-05-18 15:14:28 +0200 | [diff] [blame] | 88 | } |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 89 | |
| 90 | /* |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 91 | * Plug in direct dma map ops. |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 92 | */ |
| 93 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 94 | const struct iommu_ops *iommu, bool coherent) |
| 95 | { |
| 96 | /* |
| 97 | * IOC hardware snoops all DMA traffic keeping the caches consistent |
| 98 | * with memory - eliding need for any explicit cache maintenance of |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 99 | * DMA buffers. |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 100 | */ |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 101 | if (is_isa_arcv2() && ioc_enable && coherent) |
| 102 | dev->dma_coherent = true; |
| 103 | |
| 104 | dev_info(dev, "use %sncoherent DMA ops\n", |
| 105 | dev->dma_coherent ? "" : "non"); |
Eugeniy Paltsev | 2820a70 | 2018-07-30 19:26:34 +0300 | [diff] [blame] | 106 | } |
Christoph Hellwig | f73c904 | 2019-06-14 16:26:41 +0200 | [diff] [blame] | 107 | |
| 108 | static int __init atomic_pool_init(void) |
| 109 | { |
| 110 | return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); |
| 111 | } |
| 112 | postcore_initcall(atomic_pool_init); |