blob: 434328d8a0d907dc3a96ec04f80357795e4ebed5 [file] [log] [blame]
Noralf Trønnes2194a632019-03-12 19:43:44 -05001/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __DRM_GEM_SHMEM_HELPER_H__
4#define __DRM_GEM_SHMEM_HELPER_H__
5
6#include <linux/fs.h>
7#include <linux/mm.h>
8#include <linux/mutex.h>
9
10#include <drm/drm_file.h>
11#include <drm/drm_gem.h>
12#include <drm/drm_ioctl.h>
13#include <drm/drm_prime.h>
14
15struct dma_buf_attachment;
16struct drm_mode_create_dumb;
17struct drm_printer;
18struct sg_table;
19
20/**
21 * struct drm_gem_shmem_object - GEM object backed by shmem
22 */
23struct drm_gem_shmem_object {
24 /**
25 * @base: Base GEM object
26 */
27 struct drm_gem_object base;
28
29 /**
30 * @pages_lock: Protects the page table and use count
31 */
32 struct mutex pages_lock;
33
34 /**
35 * @pages: Page table
36 */
37 struct page **pages;
38
39 /**
40 * @pages_use_count:
41 *
42 * Reference count on the pages table.
43 * The pages are put when the count reaches zero.
44 */
45 unsigned int pages_use_count;
46
Rob Herring105401b2019-11-01 10:37:54 -050047 /**
48 * @madv: State for madvise
49 *
50 * 0 is active/inuse.
51 * A negative value is the object is purged.
52 * Positive values are driver specific and not used by the helpers.
53 */
Rob Herring17acb9f2019-08-05 08:33:57 -060054 int madv;
Rob Herring105401b2019-11-01 10:37:54 -050055
56 /**
57 * @madv_list: List entry for madvise tracking
58 *
59 * Typically used by drivers to track purgeable objects
60 */
Rob Herring17acb9f2019-08-05 08:33:57 -060061 struct list_head madv_list;
62
Noralf Trønnes2194a632019-03-12 19:43:44 -050063 /**
64 * @pages_mark_dirty_on_put:
65 *
66 * Mark pages as dirty when they are put.
67 */
68 unsigned int pages_mark_dirty_on_put : 1;
69
70 /**
71 * @pages_mark_accessed_on_put:
72 *
73 * Mark pages as accessed when they are put.
74 */
75 unsigned int pages_mark_accessed_on_put : 1;
76
77 /**
78 * @sgt: Scatter/gather table for imported PRIME buffers
79 */
80 struct sg_table *sgt;
81
82 /**
83 * @vmap_lock: Protects the vmap address and use count
84 */
85 struct mutex vmap_lock;
86
87 /**
88 * @vaddr: Kernel virtual address of the backing memory
89 */
90 void *vaddr;
91
92 /**
93 * @vmap_use_count:
94 *
95 * Reference count on the virtual address.
96 * The address are un-mapped when the count reaches zero.
97 */
98 unsigned int vmap_use_count;
Gerd Hoffmann1cad6292020-02-26 16:47:50 +010099
100 /**
Thomas Zimmermann0cf2ef42020-11-17 14:31:55 +0100101 * @map_wc: map object write-combined (instead of using shmem defaults).
Gerd Hoffmann1cad6292020-02-26 16:47:50 +0100102 */
Thomas Zimmermann0cf2ef42020-11-17 14:31:55 +0100103 bool map_wc;
Noralf Trønnes2194a632019-03-12 19:43:44 -0500104};
105
106#define to_drm_gem_shmem_obj(obj) \
107 container_of(obj, struct drm_gem_shmem_object, base)
108
Noralf Trønnes2194a632019-03-12 19:43:44 -0500109struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
110void drm_gem_shmem_free_object(struct drm_gem_object *obj);
111
112int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
113void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
114int drm_gem_shmem_pin(struct drm_gem_object *obj);
115void drm_gem_shmem_unpin(struct drm_gem_object *obj);
Thomas Zimmermann49a3f512020-11-03 10:30:11 +0100116int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
117void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
Noralf Trønnes2194a632019-03-12 19:43:44 -0500118
Rob Herring17acb9f2019-08-05 08:33:57 -0600119int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
120
121static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
122{
123 return (shmem->madv > 0) &&
124 !shmem->vmap_use_count && shmem->sgt &&
125 !shmem->base.dma_buf && !shmem->base.import_attach;
126}
127
128void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
Rob Herringdfbc7a42019-08-22 21:12:13 -0500129bool drm_gem_shmem_purge(struct drm_gem_object *obj);
Rob Herring17acb9f2019-08-05 08:33:57 -0600130
Noralf Trønnes2194a632019-03-12 19:43:44 -0500131struct drm_gem_shmem_object *
132drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
133 struct drm_device *dev, size_t size,
134 uint32_t *handle);
Thomas Zimmermannd18ee062020-06-09 11:08:19 +0200135
Noralf Trønnes2194a632019-03-12 19:43:44 -0500136int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
137 struct drm_mode_create_dumb *args);
138
Gerd Hoffmann0be89582019-10-16 13:51:54 +0200139int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
Noralf Trønnes2194a632019-03-12 19:43:44 -0500140
141void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
142 const struct drm_gem_object *obj);
143
144struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
145struct drm_gem_object *
146drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
147 struct dma_buf_attachment *attach,
148 struct sg_table *sgt);
149
150struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
151
152/**
153 * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
154 *
155 * This macro provides a shortcut for setting the shmem GEM operations in
156 * the &drm_driver structure.
157 */
158#define DRM_GEM_SHMEM_DRIVER_OPS \
159 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
160 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
161 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
162 .gem_prime_mmap = drm_gem_prime_mmap, \
163 .dumb_create = drm_gem_shmem_dumb_create
164
165#endif /* __DRM_GEM_SHMEM_HELPER_H__ */