Greentime Hu | 80081b3 | 2017-10-24 16:22:35 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/mm.h> |
Christoph Hellwig | 9f4df96 | 2020-09-22 15:36:11 +0200 | [diff] [blame] | 6 | #include <linux/dma-map-ops.h> |
Greentime Hu | 80081b3 | 2017-10-24 16:22:35 +0800 | [diff] [blame] | 7 | #include <linux/cache.h> |
| 8 | #include <linux/highmem.h> |
Greentime Hu | 80081b3 | 2017-10-24 16:22:35 +0800 | [diff] [blame] | 9 | #include <asm/cacheflush.h> |
| 10 | #include <asm/tlbflush.h> |
Greentime Hu | 80081b3 | 2017-10-24 16:22:35 +0800 | [diff] [blame] | 11 | #include <asm/proc-fns.h> |
| 12 | |
Christoph Hellwig | 4ac1c68 | 2018-05-19 09:17:01 +0200 | [diff] [blame] | 13 | static inline void cache_op(phys_addr_t paddr, size_t size, |
| 14 | void (*fn)(unsigned long start, unsigned long end)) |
| 15 | { |
| 16 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 17 | unsigned offset = paddr & ~PAGE_MASK; |
| 18 | size_t left = size; |
| 19 | unsigned long start; |
| 20 | |
| 21 | do { |
| 22 | size_t len = left; |
| 23 | |
| 24 | if (PageHighMem(page)) { |
| 25 | void *addr; |
| 26 | |
| 27 | if (offset + len > PAGE_SIZE) { |
| 28 | if (offset >= PAGE_SIZE) { |
| 29 | page += offset >> PAGE_SHIFT; |
| 30 | offset &= ~PAGE_MASK; |
| 31 | } |
| 32 | len = PAGE_SIZE - offset; |
| 33 | } |
| 34 | |
| 35 | addr = kmap_atomic(page); |
| 36 | start = (unsigned long)(addr + offset); |
| 37 | fn(start, start + len); |
| 38 | kunmap_atomic(addr); |
| 39 | } else { |
| 40 | start = (unsigned long)phys_to_virt(paddr); |
| 41 | fn(start, start + size); |
| 42 | } |
| 43 | offset = 0; |
| 44 | page++; |
| 45 | left -= len; |
| 46 | } while (left); |
| 47 | } |
| 48 | |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 49 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
| 50 | enum dma_data_direction dir) |
Christoph Hellwig | 4ac1c68 | 2018-05-19 09:17:01 +0200 | [diff] [blame] | 51 | { |
| 52 | switch (dir) { |
| 53 | case DMA_FROM_DEVICE: |
| 54 | break; |
| 55 | case DMA_TO_DEVICE: |
| 56 | case DMA_BIDIRECTIONAL: |
Christoph Hellwig | 267d2e1 | 2018-05-28 09:55:35 +0200 | [diff] [blame] | 57 | cache_op(paddr, size, cpu_dma_wb_range); |
Christoph Hellwig | 4ac1c68 | 2018-05-19 09:17:01 +0200 | [diff] [blame] | 58 | break; |
| 59 | default: |
| 60 | BUG(); |
| 61 | } |
| 62 | } |
| 63 | |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 64 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
| 65 | enum dma_data_direction dir) |
Christoph Hellwig | 4ac1c68 | 2018-05-19 09:17:01 +0200 | [diff] [blame] | 66 | { |
| 67 | switch (dir) { |
| 68 | case DMA_TO_DEVICE: |
| 69 | break; |
| 70 | case DMA_FROM_DEVICE: |
| 71 | case DMA_BIDIRECTIONAL: |
Christoph Hellwig | 267d2e1 | 2018-05-28 09:55:35 +0200 | [diff] [blame] | 72 | cache_op(paddr, size, cpu_dma_inval_range); |
Christoph Hellwig | 4ac1c68 | 2018-05-19 09:17:01 +0200 | [diff] [blame] | 73 | break; |
| 74 | default: |
| 75 | BUG(); |
| 76 | } |
| 77 | } |
Christoph Hellwig | 6309513 | 2019-04-28 14:28:38 -0500 | [diff] [blame] | 78 | |
| 79 | void arch_dma_prep_coherent(struct page *page, size_t size) |
| 80 | { |
| 81 | cache_op(page_to_phys(page), size, cpu_dma_wbinval_range); |
| 82 | } |