blob: c0b49399225dd436e92bce3c092669aee8e7bb46 [file] [log] [blame]
Vineet Gupta1162b072013-01-18 15:12:20 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +02009#include <linux/dma-noncoherent.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030010#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +053011#include <asm/cacheflush.h>
12
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030013/*
14 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
16 * - But still handle both coherent and non-coherent requests from caller
17 *
18 * For DMA coherent hardware (IOC) generic code suffices
19 */
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020020void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21 gfp_t gfp, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053022{
Vineet Guptad98a15a2016-03-14 15:03:59 +053023 unsigned long order = get_order(size);
24 struct page *page;
25 phys_addr_t paddr;
26 void *kvaddr;
Vineet Gupta6b700392016-03-14 15:34:36 +053027 int need_coh = 1, need_kvaddr = 0;
Vineet Gupta1162b072013-01-18 15:12:20 +053028
Vineet Guptad98a15a2016-03-14 15:03:59 +053029 page = alloc_pages(gfp, order);
30 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053031 return NULL;
32
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030033 if (attrs & DMA_ATTR_NON_CONSISTENT)
Vineet Gupta6b700392016-03-14 15:34:36 +053034 need_coh = 0;
35
36 /*
37 * - A coherent buffer needs MMU mapping to enforce non-cachability
38 * - A highmem page needs a virtual handle (hence MMU mapping)
39 * independent of cachability
40 */
41 if (PageHighMem(page) || need_coh)
42 need_kvaddr = 1;
43
44 /* This is linear addr (0x8000_0000 based) */
45 paddr = page_to_phys(page);
46
Christoph Hellwig57723cb2017-12-20 11:57:40 +010047 *dma_handle = paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053048
49 /* This is kernel Virtual address (0x7000_0000 based) */
Vineet Gupta6b700392016-03-14 15:34:36 +053050 if (need_kvaddr) {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053051 kvaddr = ioremap_nocache(paddr, size);
Vineet Gupta6b700392016-03-14 15:34:36 +053052 if (kvaddr == NULL) {
53 __free_pages(page, order);
54 return NULL;
55 }
56 } else {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053057 kvaddr = (void *)(u32)paddr;
Vineet Guptad98a15a2016-03-14 15:03:59 +053058 }
Vineet Gupta1162b072013-01-18 15:12:20 +053059
Vineet Gupta795f4552015-04-03 12:37:07 +030060 /*
61 * Evict any existing L1 and/or L2 lines for the backing page
62 * in case it was used earlier as a normal "cached" page.
63 * Yeah this bit us - STAR 9000898266
64 *
65 * Although core does call flush_cache_vmap(), it gets kvaddr hence
66 * can't be used to efficiently flush L1 and/or L2 which need paddr
67 * Currently flush_cache_vmap nukes the L1 cache completely which
68 * will be optimized as a separate commit
69 */
Vineet Gupta6b700392016-03-14 15:34:36 +053070 if (need_coh)
Vineet Guptaf5db19e2016-03-16 15:04:39 +053071 dma_cache_wback_inv(paddr, size);
Vineet Gupta795f4552015-04-03 12:37:07 +030072
Vineet Gupta1162b072013-01-18 15:12:20 +053073 return kvaddr;
74}
Vineet Gupta1162b072013-01-18 15:12:20 +053075
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020076void arch_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070077 dma_addr_t dma_handle, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053078{
Christoph Hellwig57723cb2017-12-20 11:57:40 +010079 phys_addr_t paddr = dma_handle;
Vladimir Kondratievb4dff282016-07-03 10:07:48 +030080 struct page *page = virt_to_page(paddr);
Vineet Gupta6b700392016-03-14 15:34:36 +053081 int is_non_coh = 1;
Vineet Guptad98a15a2016-03-14 15:03:59 +053082
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030083 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT);
Vineet Gupta6b700392016-03-14 15:34:36 +053084
85 if (PageHighMem(page) || !is_non_coh)
Christoph Hellwig052c96d2016-01-20 15:01:26 -080086 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +053087
Vineet Guptad98a15a2016-03-14 15:03:59 +053088 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +053089}
Vineet Gupta1162b072013-01-18 15:12:20 +053090
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +020091int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
92 void *cpu_addr, dma_addr_t dma_addr, size_t size,
93 unsigned long attrs)
Alexey Brodkina79a8122016-11-03 18:06:13 +030094{
95 unsigned long user_count = vma_pages(vma);
96 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwig57723cb2017-12-20 11:57:40 +010097 unsigned long pfn = __phys_to_pfn(dma_addr);
Alexey Brodkina79a8122016-11-03 18:06:13 +030098 unsigned long off = vma->vm_pgoff;
99 int ret = -ENXIO;
100
101 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
102
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100103 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Alexey Brodkina79a8122016-11-03 18:06:13 +0300104 return ret;
105
106 if (off < count && user_count <= (count - off)) {
107 ret = remap_pfn_range(vma, vma->vm_start,
108 pfn + off,
109 user_count << PAGE_SHIFT,
110 vma->vm_page_prot);
111 }
112
113 return ret;
114}
115
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300116/*
117 * Cache operations depending on function and direction argument, inspired by
118 * https://lkml.org/lkml/2018/5/18/979
119 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
120 * dma-mapping: provide a generic dma-noncoherent implementation)"
121 *
122 * | map == for_device | unmap == for_cpu
123 * |----------------------------------------------------------------
124 * TO_DEV | writeback writeback | none none
125 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
126 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
127 *
128 * [*] needed for CPU speculative prefetches
129 *
130 * NOTE: we don't check the validity of direction argument as it is done in
131 * upper layer functions (in include/linux/dma-mapping.h)
132 */
133
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200134void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
135 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200136{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300137 switch (dir) {
138 case DMA_TO_DEVICE:
139 dma_cache_wback(paddr, size);
140 break;
141
142 case DMA_FROM_DEVICE:
143 dma_cache_inv(paddr, size);
144 break;
145
146 case DMA_BIDIRECTIONAL:
147 dma_cache_wback_inv(paddr, size);
148 break;
149
150 default:
151 break;
152 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200153}
154
Christoph Hellwig6c3e71d2018-05-18 15:41:32 +0200155void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
156 size_t size, enum dma_data_direction dir)
Christoph Hellwig713a7462018-05-18 15:14:28 +0200157{
Eugeniy Paltsev4c612ad2018-07-24 17:13:02 +0300158 switch (dir) {
159 case DMA_TO_DEVICE:
160 break;
161
162 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
163 case DMA_FROM_DEVICE:
164 case DMA_BIDIRECTIONAL:
165 dma_cache_inv(paddr, size);
166 break;
167
168 default:
169 break;
170 }
Christoph Hellwig713a7462018-05-18 15:14:28 +0200171}
Eugeniy Paltsev2820a702018-07-30 19:26:34 +0300172
173/*
174 * Plug in coherent or noncoherent dma ops
175 */
176void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
177 const struct iommu_ops *iommu, bool coherent)
178{
179 /*
180 * IOC hardware snoops all DMA traffic keeping the caches consistent
181 * with memory - eliding need for any explicit cache maintenance of
182 * DMA buffers - so we can use dma_direct cache ops.
183 */
184 if (is_isa_arcv2() && ioc_enable && coherent) {
185 set_dma_ops(dev, &dma_direct_ops);
186 dev_info(dev, "use dma_direct_ops cache ops\n");
187 } else {
188 set_dma_ops(dev, &dma_noncoherent_ops);
189 dev_info(dev, "use dma_noncoherent_ops cache ops\n");
190 }
191}