blob: b1b4ecdf329a7b4fefc623f5276f10f526a8414c [file] [log] [blame]
Marc Zyngier2fbadc32016-12-02 14:19:35 +00001#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
Stefano Stabellinie0586322017-04-13 14:04:21 -07005#include <asm/dma-mapping.h>
Marc Zyngier2fbadc32016-12-02 14:19:35 +00006#include <linux/dma-mapping.h>
7
Stefano Stabellinid5ff5062017-04-13 14:04:22 -07008static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
Stefano Stabellinie0586322017-04-13 14:04:21 -07009{
10 if (dev && dev->archdata.dev_dma_ops)
11 return dev->archdata.dev_dma_ops;
12 return get_arch_dma_ops(NULL);
13}
14
Marc Zyngier2fbadc32016-12-02 14:19:35 +000015void __xen_dma_map_page(struct device *hwdev, struct page *page,
16 dma_addr_t dev_addr, unsigned long offset, size_t size,
17 enum dma_data_direction dir, unsigned long attrs);
18void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
19 size_t size, enum dma_data_direction dir,
20 unsigned long attrs);
21void __xen_dma_sync_single_for_cpu(struct device *hwdev,
22 dma_addr_t handle, size_t size, enum dma_data_direction dir);
23
24void __xen_dma_sync_single_for_device(struct device *hwdev,
25 dma_addr_t handle, size_t size, enum dma_data_direction dir);
26
27static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
28 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
29{
Stefano Stabellinid5ff5062017-04-13 14:04:22 -070030 return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
Marc Zyngier2fbadc32016-12-02 14:19:35 +000031}
32
33static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
34 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
35{
Stefano Stabellinid5ff5062017-04-13 14:04:22 -070036 xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
Marc Zyngier2fbadc32016-12-02 14:19:35 +000037}
38
39static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
40 dma_addr_t dev_addr, unsigned long offset, size_t size,
41 enum dma_data_direction dir, unsigned long attrs)
42{
43 unsigned long page_pfn = page_to_xen_pfn(page);
44 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
45 unsigned long compound_pages =
46 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
47 bool local = (page_pfn <= dev_pfn) &&
48 (dev_pfn - page_pfn < compound_pages);
49
50 /*
51 * Dom0 is mapped 1:1, while the Linux page can span across
52 * multiple Xen pages, it's not possible for it to contain a
53 * mix of local and foreign Xen pages. So if the first xen_pfn
54 * == mfn the page is local otherwise it's a foreign page
55 * grant-mapped in dom0. If the page is local we can safely
56 * call the native dma_ops function, otherwise we call the xen
57 * specific function.
58 */
59 if (local)
Stefano Stabellinid5ff5062017-04-13 14:04:22 -070060 xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
Marc Zyngier2fbadc32016-12-02 14:19:35 +000061 else
62 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
63}
64
65static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
66 size_t size, enum dma_data_direction dir, unsigned long attrs)
67{
68 unsigned long pfn = PFN_DOWN(handle);
69 /*
70 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
71 * multiple Xen page, it's not possible to have a mix of local and
72 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
73 * foreign mfn will always return false. If the page is local we can
74 * safely call the native dma_ops function, otherwise we call the xen
75 * specific function.
76 */
77 if (pfn_valid(pfn)) {
Stefano Stabellinid5ff5062017-04-13 14:04:22 -070078 if (xen_get_dma_ops(hwdev)->unmap_page)
79 xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
Marc Zyngier2fbadc32016-12-02 14:19:35 +000080 } else
81 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
82}
83
84static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
85 dma_addr_t handle, size_t size, enum dma_data_direction dir)
86{
87 unsigned long pfn = PFN_DOWN(handle);
88 if (pfn_valid(pfn)) {
Stefano Stabellinid5ff5062017-04-13 14:04:22 -070089 if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
90 xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
Marc Zyngier2fbadc32016-12-02 14:19:35 +000091 } else
92 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
93}
94
95static inline void xen_dma_sync_single_for_device(struct device *hwdev,
96 dma_addr_t handle, size_t size, enum dma_data_direction dir)
97{
98 unsigned long pfn = PFN_DOWN(handle);
99 if (pfn_valid(pfn)) {
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700100 if (xen_get_dma_ops(hwdev)->sync_single_for_device)
101 xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
Marc Zyngier2fbadc32016-12-02 14:19:35 +0000102 } else
103 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
104}
105
106#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */