blob: 3b2a28f7f49fbd5174a13e2c9daf86a1cf16be50 [file] [log] [blame]
Daniel Vetterd9fc9412014-09-23 15:46:53 +02001#ifndef __DRM_GEM_H__
2#define __DRM_GEM_H__
3
4/*
5 * GEM Graphics Execution Manager Driver Interfaces
6 *
7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Copyright (c) 2009-2010, Code Aurora Forum.
10 * All rights reserved.
11 * Copyright © 2014 Intel Corporation
12 * Daniel Vetter <daniel.vetter@ffwll.ch>
13 *
14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15 * Author: Gareth Hughes <gareth@valinux.com>
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
23 *
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37/**
Daniel Vetterdecc60b2015-10-22 19:11:27 +020038 * struct drm_gem_object - GEM buffer object
39 *
40 * This structure defines the generic parts for GEM buffer objects, which are
41 * mostly around handling mmap and userspace handles.
42 *
43 * Buffer objects are often abbreviated to BO.
Daniel Vetterd9fc9412014-09-23 15:46:53 +020044 */
45struct drm_gem_object {
Daniel Vetterdecc60b2015-10-22 19:11:27 +020046 /**
47 * @refcount:
48 *
49 * Reference count of this object
50 *
Thierry Redinge6b62712017-02-28 15:46:41 +010051 * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
52 * or drm_gem_object_put_unlocked() to release a reference to a GEM
53 * buffer object.
Daniel Vetterdecc60b2015-10-22 19:11:27 +020054 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +020055 struct kref refcount;
56
57 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +020058 * @handle_count:
59 *
60 * This is the GEM file_priv handle count of this object.
Daniel Vetterd9fc9412014-09-23 15:46:53 +020061 *
62 * Each handle also holds a reference. Note that when the handle_count
63 * drops to 0 any global names (e.g. the id in the flink namespace) will
64 * be cleared.
65 *
Daniel Vetter940eba22017-01-25 07:26:46 +010066 * Protected by &drm_device.object_name_lock.
Daniel Vetterdecc60b2015-10-22 19:11:27 +020067 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +020068 unsigned handle_count;
69
Daniel Vetterdecc60b2015-10-22 19:11:27 +020070 /**
71 * @dev: DRM dev this object belongs to.
72 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +020073 struct drm_device *dev;
74
Daniel Vetterdecc60b2015-10-22 19:11:27 +020075 /**
76 * @filp:
77 *
78 * SHMEM file node used as backing storage for swappable buffer objects.
79 * GEM also supports driver private objects with driver-specific backing
80 * storage (contiguous CMA memory, special reserved blocks). In this
81 * case @filp is NULL.
82 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +020083 struct file *filp;
84
Daniel Vetterdecc60b2015-10-22 19:11:27 +020085 /**
86 * @vma_node:
87 *
88 * Mapping info for this object to support mmap. Drivers are supposed to
89 * allocate the mmap offset using drm_gem_create_mmap_offset(). The
90 * offset itself can be retrieved using drm_vma_node_offset_addr().
91 *
92 * Memory mapping itself is handled by drm_gem_mmap(), which also checks
93 * that userspace is allowed to access the object.
94 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +020095 struct drm_vma_offset_node vma_node;
96
97 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +020098 * @size:
99 *
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200100 * Size of the object, in bytes. Immutable over the object's
101 * lifetime.
102 */
103 size_t size;
104
105 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200106 * @name:
107 *
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200108 * Global name for this object, starts at 1. 0 means unnamed.
Daniel Vetter940eba22017-01-25 07:26:46 +0100109 * Access is covered by &drm_device.object_name_lock. This is used by
110 * the GEM_FLINK and GEM_OPEN ioctls.
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200111 */
112 int name;
113
114 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200115 * @read_domains:
116 *
117 * Read memory domains. These monitor which caches contain read/write data
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200118 * related to the object. When transitioning from one set of domains
119 * to another, the driver is called to ensure that caches are suitably
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200120 * flushed and invalidated.
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200121 */
122 uint32_t read_domains;
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200123
124 /**
125 * @write_domain: Corresponding unique write memory domain.
126 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200127 uint32_t write_domain;
128
129 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200130 * @pending_read_domains:
131 *
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200132 * While validating an exec operation, the
133 * new read/write domain values are computed here.
134 * They will be transferred to the above values
135 * at the point that any cache flushing occurs
136 */
137 uint32_t pending_read_domains;
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200138
139 /**
140 * @pending_write_domain: Write domain similar to @pending_read_domains.
141 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200142 uint32_t pending_write_domain;
143
144 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200145 * @dma_buf:
146 *
147 * dma-buf associated with this GEM object.
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200148 *
149 * Pointer to the dma-buf associated with this gem object (either
150 * through importing or exporting). We break the resulting reference
151 * loop when the last gem handle for this object is released.
152 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100153 * Protected by &drm_device.object_name_lock.
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200154 */
155 struct dma_buf *dma_buf;
156
157 /**
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200158 * @import_attach:
159 *
160 * dma-buf attachment backing this object.
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200161 *
162 * Any foreign dma_buf imported as a gem object has this set to the
163 * attachment point for the device. This is invariant over the lifetime
164 * of a gem object.
165 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100166 * The &drm_driver.gem_free_object callback is responsible for cleaning
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200167 * up the dma_buf attachment and references acquired at import time.
168 *
169 * Note that the drm gem/prime core does not depend upon drivers setting
170 * this field any more. So for drivers where this doesn't make sense
171 * (e.g. virtual devices or a displaylink behind an usb bus) they can
172 * simply leave it as NULL.
173 */
174 struct dma_buf_attachment *import_attach;
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200175};
176
177void drm_gem_object_release(struct drm_gem_object *obj);
178void drm_gem_object_free(struct kref *kref);
179int drm_gem_object_init(struct drm_device *dev,
180 struct drm_gem_object *obj, size_t size);
181void drm_gem_private_object_init(struct drm_device *dev,
182 struct drm_gem_object *obj, size_t size);
183void drm_gem_vm_open(struct vm_area_struct *vma);
184void drm_gem_vm_close(struct vm_area_struct *vma);
185int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
186 struct vm_area_struct *vma);
187int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
188
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200189/**
Thierry Redinge6b62712017-02-28 15:46:41 +0100190 * drm_gem_object_get - acquire a GEM buffer object reference
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200191 * @obj: GEM buffer object
192 *
Thierry Redinge6b62712017-02-28 15:46:41 +0100193 * This function acquires an additional reference to @obj. It is illegal to
194 * call this without already holding a reference. No locks required.
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200195 */
Thierry Redinge6b62712017-02-28 15:46:41 +0100196static inline void drm_gem_object_get(struct drm_gem_object *obj)
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200197{
198 kref_get(&obj->refcount);
199}
200
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200201/**
Thierry Redinge6b62712017-02-28 15:46:41 +0100202 * __drm_gem_object_put - raw function to release a GEM buffer object reference
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200203 * @obj: GEM buffer object
204 *
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200205 * This function is meant to be used by drivers which are not encumbered with
Daniel Vetter940eba22017-01-25 07:26:46 +0100206 * &drm_device.struct_mutex legacy locking and which are using the
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200207 * gem_free_object_unlocked callback. It avoids all the locking checks and
Thierry Redinge6b62712017-02-28 15:46:41 +0100208 * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200209 *
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200210 * Drivers should never call this directly in their code. Instead they should
Thierry Redinge6b62712017-02-28 15:46:41 +0100211 * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
212 * wrapper function, and use that. Shared code should never call this, to
Daniel Vetter940eba22017-01-25 07:26:46 +0100213 * avoid breaking drivers by accident which still depend upon
214 * &drm_device.struct_mutex locking.
Daniel Vetterdecc60b2015-10-22 19:11:27 +0200215 */
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200216static inline void
Thierry Redinge6b62712017-02-28 15:46:41 +0100217__drm_gem_object_put(struct drm_gem_object *obj)
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200218{
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200219 kref_put(&obj->refcount, drm_gem_object_free);
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200220}
221
Thierry Redinge6b62712017-02-28 15:46:41 +0100222void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
223void drm_gem_object_put(struct drm_gem_object *obj);
224
225/**
226 * drm_gem_object_reference - acquire a GEM buffer object reference
227 * @obj: GEM buffer object
228 *
229 * This is a compatibility alias for drm_gem_object_get() and should not be
230 * used by new code.
231 */
232static inline void drm_gem_object_reference(struct drm_gem_object *obj)
233{
234 drm_gem_object_get(obj);
235}
236
237/**
238 * __drm_gem_object_unreference - raw function to release a GEM buffer object
239 * reference
240 * @obj: GEM buffer object
241 *
242 * This is a compatibility alias for __drm_gem_object_put() and should not be
243 * used by new code.
244 */
245static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
246{
247 __drm_gem_object_put(obj);
248}
249
250/**
251 * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
252 * @obj: GEM buffer object
253 *
254 * This is a compatibility alias for drm_gem_object_put_unlocked() and should
255 * not be used by new code.
256 */
257static inline void
258drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
259{
260 drm_gem_object_put_unlocked(obj);
261}
262
263/**
264 * drm_gem_object_unreference - release a GEM buffer object reference
265 * @obj: GEM buffer object
266 *
267 * This is a compatibility alias for drm_gem_object_put() and should not be
268 * used by new code.
269 */
270static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
271{
272 drm_gem_object_put(obj);
273}
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200274
275int drm_gem_handle_create(struct drm_file *file_priv,
276 struct drm_gem_object *obj,
277 u32 *handlep);
278int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
279
280
281void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
282int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
283int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
284
285struct page **drm_gem_get_pages(struct drm_gem_object *obj);
286void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
287 bool dirty, bool accessed);
288
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100289struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
Daniel Vetterd9fc9412014-09-23 15:46:53 +0200290int drm_gem_dumb_destroy(struct drm_file *file,
291 struct drm_device *dev,
292 uint32_t handle);
293
294#endif /* __DRM_GEM_H__ */