blob: 62c210e7ee4cdc5046b422d819a997e03134652f [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Vineet Gupta1162b072013-01-18 15:12:20 +05302/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta1162b072013-01-18 15:12:20 +05304 */
5
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +02006#include <linux/dma-noncoherent.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03007#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +05308#include <asm/cacheflush.h>
9
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030010/*
Christoph Hellwigf73c9042019-06-14 16:26:41 +020011 * ARCH specific callbacks for generic noncoherent DMA ops
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030012 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
13 * - But still handle both coherent and non-coherent requests from caller
14 *
15 * For DMA coherent hardware (IOC) generic code suffices
16 */
Christoph Hellwigf73c9042019-06-14 16:26:41 +020017
18void arch_dma_prep_coherent(struct page *page, size_t size)
Vineet Gupta1162b072013-01-18 15:12:20 +053019{
Vineet Gupta795f4552015-04-03 12:37:07 +030020 /*
21 * Evict any existing L1 and/or L2 lines for the backing page
22 * in case it was used earlier as a normal "cached" page.
23 * Yeah this bit us - STAR 9000898266
24 *
25 * Although core does call flush_cache_vmap(), it gets kvaddr hence
26 * can't be used to efficiently flush L1 and/or L2 which need paddr
27 * Currently flush_cache_vmap nukes the L1 cache completely which
28 * will be optimized as a separate commit
29 */
Christoph Hellwigf73c9042019-06-14 16:26:41 +020030 dma_cache_wback_inv(page_to_phys(page), size);
Alexey Brodkina79a8122016-11-03 18:06:13 +030031}
32
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +030033/*
34 * Cache operations depending on function and direction argument, inspired by
35 * https://lkml.org/lkml/2018/5/18/979
36 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
37 * dma-mapping: provide a generic dma-noncoherent implementation)"
38 *
39 * | map == for_device | unmap == for_cpu
40 * |----------------------------------------------------------------
41 * TO_DEV | writeback writeback | none none
42 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
43 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
44 *
45 * [*] needed for CPU speculative prefetches
46 *
47 * NOTE: we don't check the validity of direction argument as it is done in
48 * upper layer functions (in include/linux/dma-mapping.h)
49 */
50
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020051void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
52 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +020053{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +030054 switch (dir) {
55 case DMA_TO_DEVICE:
56 dma_cache_wback(paddr, size);
57 break;
58
59 case DMA_FROM_DEVICE:
60 dma_cache_inv(paddr, size);
61 break;
62
63 case DMA_BIDIRECTIONAL:
64 dma_cache_wback_inv(paddr, size);
65 break;
66
67 default:
68 break;
69 }
Christoph Hellwig713a7462018-05-18 15:14:28 +020070}
71
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020072void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
73 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +020074{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +030075 switch (dir) {
76 case DMA_TO_DEVICE:
77 break;
78
79 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
80 case DMA_FROM_DEVICE:
81 case DMA_BIDIRECTIONAL:
82 dma_cache_inv(paddr, size);
83 break;
84
85 default:
86 break;
87 }
Christoph Hellwig713a7462018-05-18 15:14:28 +020088}
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030089
90/*
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020091 * Plug in direct dma map ops.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030092 */
93void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
94 const struct iommu_ops *iommu, bool coherent)
95{
96 /*
97 * IOC hardware snoops all DMA traffic keeping the caches consistent
98 * with memory - eliding need for any explicit cache maintenance of
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020099 * DMA buffers.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300100 */
Christoph Hellwigbc3ec752018-09-08 11:22:43 +0200101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true;
103
104 dev_info(dev, "use %sncoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non");
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300106}
Christoph Hellwigf73c9042019-06-14 16:26:41 +0200107
108static int __init atomic_pool_init(void)
109{
110 return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
111}
112postcore_initcall(atomic_pool_init);