blob: 20d7d059dadb55c329f840d1238089fdbe53b215 [file] [log] [blame]
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +03001/* SPDX-License-Identifier: GPL-2.0 */
2
3/*
4 * Common functionality of grant device.
5 *
6 * Copyright (c) 2006-2007, D G Murray.
7 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
8 * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
9 */
10
11#ifndef _GNTDEV_COMMON_H
12#define _GNTDEV_COMMON_H
13
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/mmu_notifier.h>
17#include <linux/types.h>
Yan Yankovskyi0102e4e2020-03-23 18:15:11 +020018#include <xen/interface/event_channel.h>
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030019
Oleksandr Andrushchenko932d6562018-07-20 12:01:48 +030020struct gntdev_dmabuf_priv;
21
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030022struct gntdev_priv {
23 /* Maps with visible offsets in the file descriptor. */
24 struct list_head maps;
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030025 /* lock protects maps and freeable_maps. */
26 struct mutex lock;
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030027
28#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
29 /* Device for which DMA memory is allocated. */
30 struct device *dma_dev;
31#endif
Oleksandr Andrushchenko932d6562018-07-20 12:01:48 +030032
33#ifdef CONFIG_XEN_GNTDEV_DMABUF
34 struct gntdev_dmabuf_priv *dmabuf_priv;
35#endif
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030036};
37
38struct gntdev_unmap_notify {
39 int flags;
40 /* Address relative to the start of the gntdev_grant_map. */
41 int addr;
Yan Yankovskyi0102e4e2020-03-23 18:15:11 +020042 evtchn_port_t event;
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030043};
44
45struct gntdev_grant_map {
Jason Gunthorped3eeb1d2019-11-12 16:22:31 -040046 struct mmu_interval_notifier notifier;
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030047 struct list_head next;
48 struct vm_area_struct *vma;
49 int index;
50 int count;
51 int flags;
52 refcount_t users;
53 struct gntdev_unmap_notify notify;
54 struct ioctl_gntdev_grant_ref *grants;
55 struct gnttab_map_grant_ref *map_ops;
56 struct gnttab_unmap_grant_ref *unmap_ops;
57 struct gnttab_map_grant_ref *kmap_ops;
58 struct gnttab_unmap_grant_ref *kunmap_ops;
59 struct page **pages;
60 unsigned long pages_vm_start;
61
62#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
63 /*
64 * If dmabuf_vaddr is not NULL then this mapping is backed by DMA
65 * capable memory.
66 */
67
68 struct device *dma_dev;
69 /* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
70 int dma_flags;
71 void *dma_vaddr;
72 dma_addr_t dma_bus_addr;
73 /* Needed to avoid allocation in gnttab_dma_free_pages(). */
74 xen_pfn_t *frames;
75#endif
76};
77
78struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
79 int dma_flags);
80
81void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
82
83void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
84
Juergen Gross3b06ac62019-11-07 12:15:45 +010085bool gntdev_test_page_count(unsigned int count);
Oleksandr Andrushchenko1d314562018-07-20 12:01:47 +030086
87int gntdev_map_grant_pages(struct gntdev_grant_map *map);
88
89#endif