Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 1 | #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H |
| 2 | #define _ASM_ARM_XEN_PAGE_COHERENT_H |
| 3 | |
| 4 | #include <asm/page.h> |
Stefano Stabellini | e058632 | 2017-04-13 14:04:21 -0700 | [diff] [blame] | 5 | #include <asm/dma-mapping.h> |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 6 | #include <linux/dma-mapping.h> |
| 7 | |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 8 | static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) |
Stefano Stabellini | e058632 | 2017-04-13 14:04:21 -0700 | [diff] [blame] | 9 | { |
| 10 | if (dev && dev->archdata.dev_dma_ops) |
| 11 | return dev->archdata.dev_dma_ops; |
| 12 | return get_arch_dma_ops(NULL); |
| 13 | } |
| 14 | |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 15 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
| 16 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
| 17 | enum dma_data_direction dir, unsigned long attrs); |
| 18 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
| 19 | size_t size, enum dma_data_direction dir, |
| 20 | unsigned long attrs); |
| 21 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, |
| 22 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
| 23 | |
| 24 | void __xen_dma_sync_single_for_device(struct device *hwdev, |
| 25 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
| 26 | |
| 27 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, |
| 28 | dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) |
| 29 | { |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 30 | return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, |
| 34 | void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) |
| 35 | { |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 36 | xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, |
| 40 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
| 41 | enum dma_data_direction dir, unsigned long attrs) |
| 42 | { |
| 43 | unsigned long page_pfn = page_to_xen_pfn(page); |
| 44 | unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); |
| 45 | unsigned long compound_pages = |
| 46 | (1<<compound_order(page)) * XEN_PFN_PER_PAGE; |
| 47 | bool local = (page_pfn <= dev_pfn) && |
| 48 | (dev_pfn - page_pfn < compound_pages); |
| 49 | |
| 50 | /* |
| 51 | * Dom0 is mapped 1:1, while the Linux page can span across |
| 52 | * multiple Xen pages, it's not possible for it to contain a |
| 53 | * mix of local and foreign Xen pages. So if the first xen_pfn |
| 54 | * == mfn the page is local otherwise it's a foreign page |
| 55 | * grant-mapped in dom0. If the page is local we can safely |
| 56 | * call the native dma_ops function, otherwise we call the xen |
| 57 | * specific function. |
| 58 | */ |
| 59 | if (local) |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 60 | xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 61 | else |
| 62 | __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); |
| 63 | } |
| 64 | |
| 65 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
| 66 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 67 | { |
| 68 | unsigned long pfn = PFN_DOWN(handle); |
| 69 | /* |
| 70 | * Dom0 is mapped 1:1, while the Linux page can be spanned accross |
| 71 | * multiple Xen page, it's not possible to have a mix of local and |
| 72 | * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a |
| 73 | * foreign mfn will always return false. If the page is local we can |
| 74 | * safely call the native dma_ops function, otherwise we call the xen |
| 75 | * specific function. |
| 76 | */ |
| 77 | if (pfn_valid(pfn)) { |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 78 | if (xen_get_dma_ops(hwdev)->unmap_page) |
| 79 | xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 80 | } else |
| 81 | __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); |
| 82 | } |
| 83 | |
| 84 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
| 85 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 86 | { |
| 87 | unsigned long pfn = PFN_DOWN(handle); |
| 88 | if (pfn_valid(pfn)) { |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 89 | if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) |
| 90 | xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 91 | } else |
| 92 | __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); |
| 93 | } |
| 94 | |
| 95 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
| 96 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 97 | { |
| 98 | unsigned long pfn = PFN_DOWN(handle); |
| 99 | if (pfn_valid(pfn)) { |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 100 | if (xen_get_dma_ops(hwdev)->sync_single_for_device) |
| 101 | xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); |
Marc Zyngier | 2fbadc3 | 2016-12-02 14:19:35 +0000 | [diff] [blame] | 102 | } else |
| 103 | __xen_dma_sync_single_for_device(hwdev, handle, size, dir); |
| 104 | } |
| 105 | |
| 106 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ |