blob: a4446f452040aa2bdb15dfd8c28c320b073f9bf0 [file] [log] [blame]
David Howells718dced2012-10-04 18:21:50 +01001/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_
29
Gabriel Laskar10491022015-11-30 15:10:47 +010030#include "drm.h"
David Howells718dced2012-10-04 18:21:50 +010031
Emil Velikovb1c1f5c2016-04-07 19:00:35 +010032#if defined(__cplusplus)
33extern "C" {
34#endif
35
David Howells718dced2012-10-04 18:21:50 +010036/* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
Ben Widawskycce723e2013-07-19 09:16:42 -070040/**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
Ben Widawsky35a85ac2013-09-19 11:13:41 -070045 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
Ben Widawskycce723e2013-07-19 09:16:42 -070049 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * the GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
60 */
61#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62#define I915_ERROR_UEVENT "ERROR"
63#define I915_RESET_UEVENT "RESET"
David Howells718dced2012-10-04 18:21:50 +010064
Imre Deak3373ce22016-07-01 17:32:08 +030065/*
66 * MOCS indexes used for GPU surfaces, defining the cacheability of the
67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
68 */
69enum i915_mocs_table_index {
70 /*
71 * Not cached anywhere, coherency between CPU and GPU accesses is
72 * guaranteed.
73 */
74 I915_MOCS_UNCACHED,
75 /*
76 * Cacheability and coherency controlled by the kernel automatically
77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
78 * usage of the surface (used for display scanout or not).
79 */
80 I915_MOCS_PTE,
81 /*
82 * Cached in all GPU caches available on the platform.
83 * Coherency between CPU and GPU accesses to the surface is not
84 * guaranteed without extra synchronization.
85 */
86 I915_MOCS_CACHED,
87};
88
Tvrtko Ursulin1803fcbc2017-11-10 14:26:27 +000089/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
Tvrtko Ursulinb552ae42017-11-23 10:07:01 +0000113 I915_SAMPLE_SEMA = 2
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
Tvrtko Ursulin0cd46842017-11-21 18:18:50 +0000140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
Tvrtko Ursulin6060b6a2017-11-21 18:18:52 +0000141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
Tvrtko Ursulin6060b6a2017-11-21 18:18:52 +0000142
Tvrtko Ursulin3452fa32017-11-24 17:13:31 +0000143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000144
David Howells718dced2012-10-04 18:21:50 +0100145/* Each region is a minimum of 16k, and there are at most 255 of them.
146 */
147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
148 * of chars for next/prev indices */
149#define I915_LOG_MIN_TEX_REGION_SIZE 14
150
151typedef struct _drm_i915_init {
152 enum {
153 I915_INIT_DMA = 0x01,
154 I915_CLEANUP_DMA = 0x02,
155 I915_RESUME_DMA = 0x03
156 } func;
157 unsigned int mmio_offset;
158 int sarea_priv_offset;
159 unsigned int ring_start;
160 unsigned int ring_end;
161 unsigned int ring_size;
162 unsigned int front_offset;
163 unsigned int back_offset;
164 unsigned int depth_offset;
165 unsigned int w;
166 unsigned int h;
167 unsigned int pitch;
168 unsigned int pitch_bits;
169 unsigned int back_pitch;
170 unsigned int depth_pitch;
171 unsigned int cpp;
172 unsigned int chipset;
173} drm_i915_init_t;
174
175typedef struct _drm_i915_sarea {
176 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
177 int last_upload; /* last time texture was uploaded */
178 int last_enqueue; /* last time a buffer was enqueued */
179 int last_dispatch; /* age of the most recently dispatched buffer */
180 int ctxOwner; /* last context to upload state */
181 int texAge;
182 int pf_enabled; /* is pageflipping allowed? */
183 int pf_active;
184 int pf_current_page; /* which buffer is being displayed? */
185 int perf_boxes; /* performance boxes to be displayed */
186 int width, height; /* screen size in pixels */
187
188 drm_handle_t front_handle;
189 int front_offset;
190 int front_size;
191
192 drm_handle_t back_handle;
193 int back_offset;
194 int back_size;
195
196 drm_handle_t depth_handle;
197 int depth_offset;
198 int depth_size;
199
200 drm_handle_t tex_handle;
201 int tex_offset;
202 int tex_size;
203 int log_tex_granularity;
204 int pitch;
205 int rotation; /* 0, 90, 180 or 270 */
206 int rotated_offset;
207 int rotated_size;
208 int rotated_pitch;
209 int virtualX, virtualY;
210
211 unsigned int front_tiled;
212 unsigned int back_tiled;
213 unsigned int depth_tiled;
214 unsigned int rotated_tiled;
215 unsigned int rotated2_tiled;
216
217 int pipeA_x;
218 int pipeA_y;
219 int pipeA_w;
220 int pipeA_h;
221 int pipeB_x;
222 int pipeB_y;
223 int pipeB_w;
224 int pipeB_h;
225
226 /* fill out some space for old userspace triple buffer */
227 drm_handle_t unused_handle;
228 __u32 unused1, unused2, unused3;
229
230 /* buffer object handles for static buffers. May change
231 * over the lifetime of the client.
232 */
233 __u32 front_bo_handle;
234 __u32 back_bo_handle;
235 __u32 unused_bo_handle;
236 __u32 depth_bo_handle;
237
238} drm_i915_sarea_t;
239
240/* due to userspace building against these headers we need some compat here */
241#define planeA_x pipeA_x
242#define planeA_y pipeA_y
243#define planeA_w pipeA_w
244#define planeA_h pipeA_h
245#define planeB_x pipeB_x
246#define planeB_y pipeB_y
247#define planeB_w pipeB_w
248#define planeB_h pipeB_h
249
250/* Flags for perf_boxes
251 */
252#define I915_BOX_RING_EMPTY 0x1
253#define I915_BOX_FLIP 0x2
254#define I915_BOX_WAIT 0x4
255#define I915_BOX_TEXTURE_LOAD 0x8
256#define I915_BOX_LOST_CONTEXT 0x10
257
Damien Lespiau21631f12015-05-26 14:57:19 +0100258/*
259 * i915 specific ioctls.
260 *
261 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
262 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
263 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
David Howells718dced2012-10-04 18:21:50 +0100264 */
265#define DRM_I915_INIT 0x00
266#define DRM_I915_FLUSH 0x01
267#define DRM_I915_FLIP 0x02
268#define DRM_I915_BATCHBUFFER 0x03
269#define DRM_I915_IRQ_EMIT 0x04
270#define DRM_I915_IRQ_WAIT 0x05
271#define DRM_I915_GETPARAM 0x06
272#define DRM_I915_SETPARAM 0x07
273#define DRM_I915_ALLOC 0x08
274#define DRM_I915_FREE 0x09
275#define DRM_I915_INIT_HEAP 0x0a
276#define DRM_I915_CMDBUFFER 0x0b
277#define DRM_I915_DESTROY_HEAP 0x0c
278#define DRM_I915_SET_VBLANK_PIPE 0x0d
279#define DRM_I915_GET_VBLANK_PIPE 0x0e
280#define DRM_I915_VBLANK_SWAP 0x0f
281#define DRM_I915_HWS_ADDR 0x11
282#define DRM_I915_GEM_INIT 0x13
283#define DRM_I915_GEM_EXECBUFFER 0x14
284#define DRM_I915_GEM_PIN 0x15
285#define DRM_I915_GEM_UNPIN 0x16
286#define DRM_I915_GEM_BUSY 0x17
287#define DRM_I915_GEM_THROTTLE 0x18
288#define DRM_I915_GEM_ENTERVT 0x19
289#define DRM_I915_GEM_LEAVEVT 0x1a
290#define DRM_I915_GEM_CREATE 0x1b
291#define DRM_I915_GEM_PREAD 0x1c
292#define DRM_I915_GEM_PWRITE 0x1d
293#define DRM_I915_GEM_MMAP 0x1e
294#define DRM_I915_GEM_SET_DOMAIN 0x1f
295#define DRM_I915_GEM_SW_FINISH 0x20
296#define DRM_I915_GEM_SET_TILING 0x21
297#define DRM_I915_GEM_GET_TILING 0x22
298#define DRM_I915_GEM_GET_APERTURE 0x23
299#define DRM_I915_GEM_MMAP_GTT 0x24
300#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
301#define DRM_I915_GEM_MADVISE 0x26
302#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
303#define DRM_I915_OVERLAY_ATTRS 0x28
304#define DRM_I915_GEM_EXECBUFFER2 0x29
Chris Wilsonfec04452017-01-27 09:40:08 +0000305#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
David Howells718dced2012-10-04 18:21:50 +0100306#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
307#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
308#define DRM_I915_GEM_WAIT 0x2c
309#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
310#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
311#define DRM_I915_GEM_SET_CACHING 0x2f
312#define DRM_I915_GEM_GET_CACHING 0x30
313#define DRM_I915_REG_READ 0x31
Mika Kuoppalab6359912013-10-30 15:44:16 +0200314#define DRM_I915_GET_RESET_STATS 0x32
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100315#define DRM_I915_GEM_USERPTR 0x33
Chris Wilsonc9dc0f32014-12-24 08:13:40 -0800316#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
317#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
Robert Braggeec688e2016-11-07 19:49:47 +0000318#define DRM_I915_PERF_OPEN 0x36
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100319#define DRM_I915_PERF_ADD_CONFIG 0x37
320#define DRM_I915_PERF_REMOVE_CONFIG 0x38
Lionel Landwerlina446ae22018-03-06 12:28:56 +0000321#define DRM_I915_QUERY 0x39
David Howells718dced2012-10-04 18:21:50 +0100322
323#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
324#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
325#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
326#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
327#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
328#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
329#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
330#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
331#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
332#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
333#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
334#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
335#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
336#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
337#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
338#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
339#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
340#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
341#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
342#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
Chris Wilsonfec04452017-01-27 09:40:08 +0000343#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
David Howells718dced2012-10-04 18:21:50 +0100344#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
345#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
346#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
347#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
348#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
349#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
350#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
351#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
352#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
353#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
354#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
355#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
356#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
357#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
358#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
359#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
360#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
361#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
362#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
363#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
364#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
365#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
366#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
Tommi Rantala2c60fae2015-03-26 21:47:16 +0200367#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
David Howells718dced2012-10-04 18:21:50 +0100368#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
369#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
370#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
371#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
Mika Kuoppalab6359912013-10-30 15:44:16 +0200372#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100373#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
Chris Wilsonc9dc0f32014-12-24 08:13:40 -0800374#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
375#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
Robert Braggeec688e2016-11-07 19:49:47 +0000376#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100377#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
378#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
Lionel Landwerlina446ae22018-03-06 12:28:56 +0000379#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
David Howells718dced2012-10-04 18:21:50 +0100380
381/* Allow drivers to submit batchbuffers directly to hardware, relying
382 * on the security mechanisms provided by hardware.
383 */
384typedef struct drm_i915_batchbuffer {
385 int start; /* agp offset */
386 int used; /* nr bytes in use */
387 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
388 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
389 int num_cliprects; /* mulitpass with multiple cliprects? */
390 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
391} drm_i915_batchbuffer_t;
392
393/* As above, but pass a pointer to userspace buffer which can be
394 * validated by the kernel prior to sending to hardware.
395 */
396typedef struct _drm_i915_cmdbuffer {
397 char __user *buf; /* pointer to userspace command buffer */
398 int sz; /* nr bytes in buf */
399 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
400 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
401 int num_cliprects; /* mulitpass with multiple cliprects? */
402 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
403} drm_i915_cmdbuffer_t;
404
405/* Userspace can request & wait on irq's:
406 */
407typedef struct drm_i915_irq_emit {
408 int __user *irq_seq;
409} drm_i915_irq_emit_t;
410
411typedef struct drm_i915_irq_wait {
412 int irq_seq;
413} drm_i915_irq_wait_t;
414
415/* Ioctl to query kernel params:
416 */
417#define I915_PARAM_IRQ_ACTIVE 1
418#define I915_PARAM_ALLOW_BATCHBUFFER 2
419#define I915_PARAM_LAST_DISPATCH 3
420#define I915_PARAM_CHIPSET_ID 4
421#define I915_PARAM_HAS_GEM 5
422#define I915_PARAM_NUM_FENCES_AVAIL 6
423#define I915_PARAM_HAS_OVERLAY 7
424#define I915_PARAM_HAS_PAGEFLIPPING 8
425#define I915_PARAM_HAS_EXECBUF2 9
426#define I915_PARAM_HAS_BSD 10
427#define I915_PARAM_HAS_BLT 11
428#define I915_PARAM_HAS_RELAXED_FENCING 12
429#define I915_PARAM_HAS_COHERENT_RINGS 13
430#define I915_PARAM_HAS_EXEC_CONSTANTS 14
431#define I915_PARAM_HAS_RELAXED_DELTA 15
432#define I915_PARAM_HAS_GEN7_SOL_RESET 16
433#define I915_PARAM_HAS_LLC 17
434#define I915_PARAM_HAS_ALIASING_PPGTT 18
435#define I915_PARAM_HAS_WAIT_TIMEOUT 19
436#define I915_PARAM_HAS_SEMAPHORES 20
437#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
Xiang, Haihaoa1f2cc72013-05-28 19:22:34 -0700438#define I915_PARAM_HAS_VEBOX 22
Daniel Vetterc2fb7912012-10-22 14:34:51 +0200439#define I915_PARAM_HAS_SECURE_BATCHES 23
Daniel Vetterb45305f2012-12-17 16:21:27 +0100440#define I915_PARAM_HAS_PINNED_BATCHES 24
Daniel Vettered5982e2013-01-17 22:23:36 +0100441#define I915_PARAM_HAS_EXEC_NO_RELOC 25
Chris Wilsoneef90cc2013-01-08 10:53:17 +0000442#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
Chris Wilson651d7942013-08-08 14:41:10 +0100443#define I915_PARAM_HAS_WT 27
Brad Volkind728c8e2014-02-18 10:15:56 -0800444#define I915_PARAM_CMD_PARSER_VERSION 28
Chris Wilson6a2c4232014-11-04 04:51:40 -0800445#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
Akash Goel1816f922015-01-02 16:29:30 +0530446#define I915_PARAM_MMAP_VERSION 30
Zhipeng Gong08e16dc2015-01-13 08:48:25 +0800447#define I915_PARAM_HAS_BSD2 31
Neil Roberts27cd4462015-03-04 14:41:16 +0000448#define I915_PARAM_REVISION 32
Jeff McGeea1559ff2015-03-09 16:06:54 -0700449#define I915_PARAM_SUBSLICE_TOTAL 33
450#define I915_PARAM_EU_TOTAL 34
Chris Wilson49e4d8422015-06-15 12:23:48 +0100451#define I915_PARAM_HAS_GPU_RESET 35
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +0300452#define I915_PARAM_HAS_RESOURCE_STREAMER 36
Chris Wilson506a8e82015-12-08 11:55:07 +0000453#define I915_PARAM_HAS_EXEC_SOFTPIN 37
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100454#define I915_PARAM_HAS_POOLED_EU 38
455#define I915_PARAM_MIN_EU_IN_POOL 39
Chris Wilson4cc69072016-08-25 19:05:19 +0100456#define I915_PARAM_MMAP_GTT_VERSION 40
David Howells718dced2012-10-04 18:21:50 +0100457
Chris Wilsonbf64e0b2017-10-03 21:34:51 +0100458/*
459 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
Chris Wilson0de91362016-11-14 20:41:01 +0000460 * priorities and the driver will attempt to execute batches in priority order.
Chris Wilsonbf64e0b2017-10-03 21:34:51 +0100461 * The param returns a capability bitmask, nonzero implies that the scheduler
462 * is enabled, with different features present according to the mask.
Chris Wilsonac14fbd2017-10-03 21:34:53 +0100463 *
464 * The initial priority for each batch is supplied by the context and is
465 * controlled via I915_CONTEXT_PARAM_PRIORITY.
Chris Wilson0de91362016-11-14 20:41:01 +0000466 */
467#define I915_PARAM_HAS_SCHEDULER 41
Chris Wilsonbf64e0b2017-10-03 21:34:51 +0100468#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
469#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
470#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
471
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800472#define I915_PARAM_HUC_STATUS 42
Chris Wilson0de91362016-11-14 20:41:01 +0000473
Chris Wilson77ae9952017-01-27 09:40:07 +0000474/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
475 * synchronisation with implicit fencing on individual objects.
476 * See EXEC_OBJECT_ASYNC.
477 */
478#define I915_PARAM_HAS_EXEC_ASYNC 43
479
Chris Wilsonfec04452017-01-27 09:40:08 +0000480/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
481 * both being able to pass in a sync_file fd to wait upon before executing,
482 * and being able to return a new sync_file fd that is signaled when the
483 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
484 */
485#define I915_PARAM_HAS_EXEC_FENCE 44
486
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100487/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
488 * user specified bufffers for post-mortem debugging of GPU hangs. See
489 * EXEC_OBJECT_CAPTURE.
490 */
491#define I915_PARAM_HAS_EXEC_CAPTURE 45
492
Robert Bragg7fed5552017-06-13 12:22:59 +0100493#define I915_PARAM_SLICE_MASK 46
494
Robert Braggf5320232017-06-13 12:23:00 +0100495/* Assuming it's uniform for each slice, this queries the mask of subslices
496 * per-slice for this system.
497 */
498#define I915_PARAM_SUBSLICE_MASK 47
499
Chris Wilson1a71cf22017-06-16 15:05:23 +0100500/*
501 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
502 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
503 */
504#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
505
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100506/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
507 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
508 */
509#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
510
Chris Wilsond2b4b972017-11-10 14:26:33 +0000511/*
512 * Query whether every context (both per-file default and user created) is
513 * isolated (insofar as HW supports). If this parameter is not true, then
514 * freshly created contexts may inherit values from an existing context,
515 * rather than default HW values. If true, it also ensures (insofar as HW
516 * supports) that all state set by this context will not leak to any other
517 * context.
518 *
519 * As not every engine across every gen support contexts, the returned
520 * value reports the support of context isolation for individual engines by
521 * returning a bitmask of each engine class set to true if that class supports
522 * isolation.
523 */
524#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
525
Lionel Landwerlindab91782017-11-10 19:08:44 +0000526/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
527 * registers. This used to be fixed per platform but from CNL onwards, this
528 * might vary depending on the parts.
529 */
530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
531
Chris Wilson900ccf32018-07-20 11:19:10 +0100532/*
533 * Once upon a time we supposed that writes through the GGTT would be
534 * immediately in physical memory (once flushed out of the CPU path). However,
535 * on a few different processors and chipsets, this is not necessarily the case
536 * as the writes appear to be buffered internally. Thus a read of the backing
537 * storage (physical memory) via a different path (with different physical tags
538 * to the indirect write via the GGTT) will see stale values from before
539 * the GGTT write. Inside the kernel, we can for the most part keep track of
540 * the different read/write domains in use (e.g. set-domain), but the assumption
541 * of coherency is baked into the ABI, hence reporting its true state in this
542 * parameter.
543 *
544 * Reports true when writes via mmap_gtt are immediately visible following an
545 * lfence to flush the WCB.
546 *
547 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
548 * internal buffer and are _not_ immediately visible to third parties accessing
549 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
550 * communications channel when reporting false is strongly disadvised.
551 */
552#define I915_PARAM_MMAP_GTT_COHERENT 52
553
David Howells718dced2012-10-04 18:21:50 +0100554typedef struct drm_i915_getparam {
Artem Savkov16f72492015-09-02 13:41:18 +0200555 __s32 param;
Daniel Vetter346add72015-07-14 18:07:30 +0200556 /*
557 * WARNING: Using pointers instead of fixed-size u64 means we need to write
558 * compat32 code. Don't repeat this mistake.
559 */
David Howells718dced2012-10-04 18:21:50 +0100560 int __user *value;
561} drm_i915_getparam_t;
562
563/* Ioctl to set kernel params:
564 */
565#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
566#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
567#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
568#define I915_SETPARAM_NUM_USED_FENCES 4
569
570typedef struct drm_i915_setparam {
571 int param;
572 int value;
573} drm_i915_setparam_t;
574
575/* A memory manager for regions of shared memory:
576 */
577#define I915_MEM_REGION_AGP 1
578
579typedef struct drm_i915_mem_alloc {
580 int region;
581 int alignment;
582 int size;
583 int __user *region_offset; /* offset from start of fb or agp */
584} drm_i915_mem_alloc_t;
585
586typedef struct drm_i915_mem_free {
587 int region;
588 int region_offset;
589} drm_i915_mem_free_t;
590
591typedef struct drm_i915_mem_init_heap {
592 int region;
593 int size;
594 int start;
595} drm_i915_mem_init_heap_t;
596
597/* Allow memory manager to be torn down and re-initialized (eg on
598 * rotate):
599 */
600typedef struct drm_i915_mem_destroy_heap {
601 int region;
602} drm_i915_mem_destroy_heap_t;
603
604/* Allow X server to configure which pipes to monitor for vblank signals
605 */
606#define DRM_I915_VBLANK_PIPE_A 1
607#define DRM_I915_VBLANK_PIPE_B 2
608
609typedef struct drm_i915_vblank_pipe {
610 int pipe;
611} drm_i915_vblank_pipe_t;
612
613/* Schedule buffer swap at given vertical blank:
614 */
615typedef struct drm_i915_vblank_swap {
616 drm_drawable_t drawable;
617 enum drm_vblank_seq_type seqtype;
618 unsigned int sequence;
619} drm_i915_vblank_swap_t;
620
621typedef struct drm_i915_hws_addr {
622 __u64 addr;
623} drm_i915_hws_addr_t;
624
625struct drm_i915_gem_init {
626 /**
627 * Beginning offset in the GTT to be managed by the DRM memory
628 * manager.
629 */
630 __u64 gtt_start;
631 /**
632 * Ending offset in the GTT to be managed by the DRM memory
633 * manager.
634 */
635 __u64 gtt_end;
636};
637
638struct drm_i915_gem_create {
639 /**
640 * Requested size for the object.
641 *
642 * The (page-aligned) allocated size for the object will be returned.
643 */
644 __u64 size;
645 /**
646 * Returned handle for the object.
647 *
648 * Object handles are nonzero.
649 */
650 __u32 handle;
651 __u32 pad;
652};
653
654struct drm_i915_gem_pread {
655 /** Handle for the object being read. */
656 __u32 handle;
657 __u32 pad;
658 /** Offset into the object to read from */
659 __u64 offset;
660 /** Length of data to read */
661 __u64 size;
662 /**
663 * Pointer to write the data into.
664 *
665 * This is a fixed-size type for 32/64 compatibility.
666 */
667 __u64 data_ptr;
668};
669
670struct drm_i915_gem_pwrite {
671 /** Handle for the object being written to. */
672 __u32 handle;
673 __u32 pad;
674 /** Offset into the object to write to */
675 __u64 offset;
676 /** Length of data to write */
677 __u64 size;
678 /**
679 * Pointer to read the data from.
680 *
681 * This is a fixed-size type for 32/64 compatibility.
682 */
683 __u64 data_ptr;
684};
685
686struct drm_i915_gem_mmap {
687 /** Handle for the object being mapped. */
688 __u32 handle;
689 __u32 pad;
690 /** Offset in the object to map. */
691 __u64 offset;
692 /**
693 * Length of data to map.
694 *
695 * The value will be page-aligned.
696 */
697 __u64 size;
698 /**
699 * Returned pointer the data was mapped at.
700 *
701 * This is a fixed-size type for 32/64 compatibility.
702 */
703 __u64 addr_ptr;
Akash Goel1816f922015-01-02 16:29:30 +0530704
705 /**
706 * Flags for extended behaviour.
707 *
708 * Added in version 2.
709 */
710 __u64 flags;
711#define I915_MMAP_WC 0x1
David Howells718dced2012-10-04 18:21:50 +0100712};
713
714struct drm_i915_gem_mmap_gtt {
715 /** Handle for the object being mapped. */
716 __u32 handle;
717 __u32 pad;
718 /**
719 * Fake offset to use for subsequent mmap call
720 *
721 * This is a fixed-size type for 32/64 compatibility.
722 */
723 __u64 offset;
724};
725
726struct drm_i915_gem_set_domain {
727 /** Handle for the object */
728 __u32 handle;
729
730 /** New read domains */
731 __u32 read_domains;
732
733 /** New write domain */
734 __u32 write_domain;
735};
736
737struct drm_i915_gem_sw_finish {
738 /** Handle for the object */
739 __u32 handle;
740};
741
742struct drm_i915_gem_relocation_entry {
743 /**
744 * Handle of the buffer being pointed to by this relocation entry.
745 *
746 * It's appealing to make this be an index into the mm_validate_entry
747 * list to refer to the buffer, but this allows the driver to create
748 * a relocation list for state buffers and not re-write it per
749 * exec using the buffer.
750 */
751 __u32 target_handle;
752
753 /**
754 * Value to be added to the offset of the target buffer to make up
755 * the relocation entry.
756 */
757 __u32 delta;
758
759 /** Offset in the buffer the relocation entry will be written into */
760 __u64 offset;
761
762 /**
763 * Offset value of the target buffer that the relocation entry was last
764 * written as.
765 *
766 * If the buffer has the same offset as last time, we can skip syncing
767 * and writing the relocation. This value is written back out by
768 * the execbuffer ioctl when the relocation is written.
769 */
770 __u64 presumed_offset;
771
772 /**
773 * Target memory domains read by this operation.
774 */
775 __u32 read_domains;
776
777 /**
778 * Target memory domains written by this operation.
779 *
780 * Note that only one domain may be written by the whole
781 * execbuffer operation, so that where there are conflicts,
782 * the application will get -EINVAL back.
783 */
784 __u32 write_domain;
785};
786
787/** @{
788 * Intel memory domains
789 *
790 * Most of these just align with the various caches in
791 * the system and are used to flush and invalidate as
792 * objects end up cached in different domains.
793 */
794/** CPU cache */
795#define I915_GEM_DOMAIN_CPU 0x00000001
796/** Render cache, used by 2D and 3D drawing */
797#define I915_GEM_DOMAIN_RENDER 0x00000002
798/** Sampler cache, used by texture engine */
799#define I915_GEM_DOMAIN_SAMPLER 0x00000004
800/** Command queue, used to load batch buffers */
801#define I915_GEM_DOMAIN_COMMAND 0x00000008
802/** Instruction cache, used by shader programs */
803#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
804/** Vertex address cache */
805#define I915_GEM_DOMAIN_VERTEX 0x00000020
806/** GTT domain - aperture and scanout */
807#define I915_GEM_DOMAIN_GTT 0x00000040
Chris Wilsone22d8e32017-04-12 12:01:11 +0100808/** WC domain - uncached access */
809#define I915_GEM_DOMAIN_WC 0x00000080
David Howells718dced2012-10-04 18:21:50 +0100810/** @} */
811
812struct drm_i915_gem_exec_object {
813 /**
814 * User's handle for a buffer to be bound into the GTT for this
815 * operation.
816 */
817 __u32 handle;
818
819 /** Number of relocations to be performed on this buffer */
820 __u32 relocation_count;
821 /**
822 * Pointer to array of struct drm_i915_gem_relocation_entry containing
823 * the relocations to be performed in this buffer.
824 */
825 __u64 relocs_ptr;
826
827 /** Required alignment in graphics aperture */
828 __u64 alignment;
829
830 /**
831 * Returned value of the updated offset of the object, for future
832 * presumed_offset writes.
833 */
834 __u64 offset;
835};
836
837struct drm_i915_gem_execbuffer {
838 /**
839 * List of buffers to be validated with their relocations to be
840 * performend on them.
841 *
842 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
843 *
844 * These buffers must be listed in an order such that all relocations
845 * a buffer is performing refer to buffers that have already appeared
846 * in the validate list.
847 */
848 __u64 buffers_ptr;
849 __u32 buffer_count;
850
851 /** Offset in the batchbuffer to start execution from. */
852 __u32 batch_start_offset;
853 /** Bytes used in batchbuffer from batch_start_offset */
854 __u32 batch_len;
855 __u32 DR1;
856 __u32 DR4;
857 __u32 num_cliprects;
858 /** This is a struct drm_clip_rect *cliprects */
859 __u64 cliprects_ptr;
860};
861
862struct drm_i915_gem_exec_object2 {
863 /**
864 * User's handle for a buffer to be bound into the GTT for this
865 * operation.
866 */
867 __u32 handle;
868
869 /** Number of relocations to be performed on this buffer */
870 __u32 relocation_count;
871 /**
872 * Pointer to array of struct drm_i915_gem_relocation_entry containing
873 * the relocations to be performed in this buffer.
874 */
875 __u64 relocs_ptr;
876
877 /** Required alignment in graphics aperture */
878 __u64 alignment;
879
880 /**
Chris Wilson506a8e82015-12-08 11:55:07 +0000881 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
882 * the user with the GTT offset at which this object will be pinned.
883 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
884 * presumed_offset of the object.
885 * During execbuffer2 the kernel populates it with the value of the
886 * current GTT offset of the object, for future presumed_offset writes.
David Howells718dced2012-10-04 18:21:50 +0100887 */
888 __u64 offset;
889
Dave Gordon9e2793f62016-07-14 14:52:03 +0100890#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
891#define EXEC_OBJECT_NEEDS_GTT (1<<1)
892#define EXEC_OBJECT_WRITE (1<<2)
Michel Thierry101b5062015-10-01 13:33:57 +0100893#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
Dave Gordon9e2793f62016-07-14 14:52:03 +0100894#define EXEC_OBJECT_PINNED (1<<4)
Chris Wilson91b2db62016-08-04 16:32:23 +0100895#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
Chris Wilson77ae9952017-01-27 09:40:07 +0000896/* The kernel implicitly tracks GPU activity on all GEM objects, and
897 * synchronises operations with outstanding rendering. This includes
898 * rendering on other devices if exported via dma-buf. However, sometimes
899 * this tracking is too coarse and the user knows better. For example,
900 * if the object is split into non-overlapping ranges shared between different
901 * clients or engines (i.e. suballocating objects), the implicit tracking
902 * by kernel assumes that each operation affects the whole object rather
903 * than an individual range, causing needless synchronisation between clients.
904 * The kernel will also forgo any CPU cache flushes prior to rendering from
905 * the object as the client is expected to be also handling such domain
906 * tracking.
907 *
908 * The kernel maintains the implicit tracking in order to manage resources
909 * used by the GPU - this flag only disables the synchronisation prior to
910 * rendering with this object in this execbuf.
911 *
912 * Opting out of implicit synhronisation requires the user to do its own
913 * explicit tracking to avoid rendering corruption. See, for example,
914 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
915 */
916#define EXEC_OBJECT_ASYNC (1<<6)
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100917/* Request that the contents of this execobject be copied into the error
918 * state upon a GPU hang involving this batch for post-mortem debugging.
919 * These buffers are recorded in no particular order as "user" in
920 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
921 * if the kernel supports this flag.
922 */
923#define EXEC_OBJECT_CAPTURE (1<<7)
Dave Gordon9e2793f62016-07-14 14:52:03 +0100924/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100925#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
David Howells718dced2012-10-04 18:21:50 +0100926 __u64 flags;
Daniel Vettered5982e2013-01-17 22:23:36 +0100927
Chris Wilson91b2db62016-08-04 16:32:23 +0100928 union {
929 __u64 rsvd1;
930 __u64 pad_to_size;
931 };
David Howells718dced2012-10-04 18:21:50 +0100932 __u64 rsvd2;
933};
934
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100935struct drm_i915_gem_exec_fence {
936 /**
937 * User's handle for a drm_syncobj to wait on or signal.
938 */
939 __u32 handle;
940
941#define I915_EXEC_FENCE_WAIT (1<<0)
942#define I915_EXEC_FENCE_SIGNAL (1<<1)
Tvrtko Ursulinebcaa1f2017-10-31 10:23:25 +0000943#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100944 __u32 flags;
945};
946
David Howells718dced2012-10-04 18:21:50 +0100947struct drm_i915_gem_execbuffer2 {
948 /**
949 * List of gem_exec_object2 structs
950 */
951 __u64 buffers_ptr;
952 __u32 buffer_count;
953
954 /** Offset in the batchbuffer to start execution from. */
955 __u32 batch_start_offset;
956 /** Bytes used in batchbuffer from batch_start_offset */
957 __u32 batch_len;
958 __u32 DR1;
959 __u32 DR4;
960 __u32 num_cliprects;
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100961 /**
962 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
963 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
964 * struct drm_i915_gem_exec_fence *fences.
965 */
David Howells718dced2012-10-04 18:21:50 +0100966 __u64 cliprects_ptr;
967#define I915_EXEC_RING_MASK (7<<0)
968#define I915_EXEC_DEFAULT (0<<0)
969#define I915_EXEC_RENDER (1<<0)
970#define I915_EXEC_BSD (2<<0)
971#define I915_EXEC_BLT (3<<0)
Xiang, Haihao82f91b62013-05-28 19:22:33 -0700972#define I915_EXEC_VEBOX (4<<0)
David Howells718dced2012-10-04 18:21:50 +0100973
974/* Used for switching the constants addressing mode on gen4+ RENDER ring.
975 * Gen6+ only supports relative addressing to dynamic state (default) and
976 * absolute addressing.
977 *
978 * These flags are ignored for the BSD and BLT rings.
979 */
980#define I915_EXEC_CONSTANTS_MASK (3<<6)
981#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
982#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
983#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
984 __u64 flags;
985 __u64 rsvd1; /* now used for context info */
986 __u64 rsvd2;
987};
988
989/** Resets the SO write offset registers for transform feedback on gen7. */
990#define I915_EXEC_GEN7_SOL_RESET (1<<8)
991
Daniel Vetterc2fb7912012-10-22 14:34:51 +0200992/** Request a privileged ("secure") batch buffer. Note only available for
993 * DRM_ROOT_ONLY | DRM_MASTER processes.
994 */
995#define I915_EXEC_SECURE (1<<9)
996
Daniel Vetterb45305f2012-12-17 16:21:27 +0100997/** Inform the kernel that the batch is and will always be pinned. This
998 * negates the requirement for a workaround to be performed to avoid
999 * an incoherent CS (such as can be found on 830/845). If this flag is
1000 * not passed, the kernel will endeavour to make sure the batch is
1001 * coherent with the CS before execution. If this flag is passed,
1002 * userspace assumes the responsibility for ensuring the same.
1003 */
1004#define I915_EXEC_IS_PINNED (1<<10)
1005
Geert Uytterhoevenc3d19d32014-01-12 14:08:43 +01001006/** Provide a hint to the kernel that the command stream and auxiliary
Daniel Vettered5982e2013-01-17 22:23:36 +01001007 * state buffers already holds the correct presumed addresses and so the
1008 * relocation process may be skipped if no buffers need to be moved in
1009 * preparation for the execbuffer.
1010 */
1011#define I915_EXEC_NO_RELOC (1<<11)
1012
Chris Wilsoneef90cc2013-01-08 10:53:17 +00001013/** Use the reloc.handle as an index into the exec object array rather
1014 * than as the per-file handle.
1015 */
1016#define I915_EXEC_HANDLE_LUT (1<<12)
1017
Zhipeng Gong8d360df2015-01-13 08:48:24 +08001018/** Used for switching BSD rings on the platforms with two BSD rings */
Tvrtko Ursulind9da6aa2016-01-27 13:41:09 +00001019#define I915_EXEC_BSD_SHIFT (13)
1020#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1021/* default ping-pong mode */
1022#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1023#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1024#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
Zhipeng Gong8d360df2015-01-13 08:48:24 +08001025
Abdiel Janulguea9ed33c2015-07-01 10:12:23 +03001026/** Tell the kernel that the batchbuffer is processed by
1027 * the resource streamer.
1028 */
1029#define I915_EXEC_RESOURCE_STREAMER (1<<15)
1030
Chris Wilsonfec04452017-01-27 09:40:08 +00001031/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1032 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1033 * the batch.
1034 *
1035 * Returns -EINVAL if the sync_file fd cannot be found.
1036 */
1037#define I915_EXEC_FENCE_IN (1<<16)
1038
1039/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1040 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1041 * to the caller, and it should be close() after use. (The fd is a regular
1042 * file descriptor and will be cleaned up on process termination. It holds
1043 * a reference to the request, but nothing else.)
1044 *
1045 * The sync_file fd can be combined with other sync_file and passed either
1046 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1047 * will only occur after this request completes), or to other devices.
1048 *
1049 * Using I915_EXEC_FENCE_OUT requires use of
1050 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1051 * back to userspace. Failure to do so will cause the out-fence to always
1052 * be reported as zero, and the real fence fd to be leaked.
1053 */
1054#define I915_EXEC_FENCE_OUT (1<<17)
1055
Chris Wilson1a71cf22017-06-16 15:05:23 +01001056/*
1057 * Traditionally the execbuf ioctl has only considered the final element in
1058 * the execobject[] to be the executable batch. Often though, the client
1059 * will known the batch object prior to construction and being able to place
1060 * it into the execobject[] array first can simplify the relocation tracking.
1061 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1062 * execobject[] as the * batch instead (the default is to use the last
1063 * element).
1064 */
1065#define I915_EXEC_BATCH_FIRST (1<<18)
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +01001066
1067/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1068 * define an array of i915_gem_exec_fence structures which specify a set of
1069 * dma fences to wait upon or signal.
1070 */
1071#define I915_EXEC_FENCE_ARRAY (1<<19)
1072
1073#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
Daniel Vettered5982e2013-01-17 22:23:36 +01001074
David Howells718dced2012-10-04 18:21:50 +01001075#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1076#define i915_execbuffer2_set_context_id(eb2, context) \
1077 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1078#define i915_execbuffer2_get_context_id(eb2) \
1079 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1080
1081struct drm_i915_gem_pin {
1082 /** Handle of the buffer to be pinned. */
1083 __u32 handle;
1084 __u32 pad;
1085
1086 /** alignment required within the aperture */
1087 __u64 alignment;
1088
1089 /** Returned GTT offset of the buffer. */
1090 __u64 offset;
1091};
1092
1093struct drm_i915_gem_unpin {
1094 /** Handle of the buffer to be unpinned. */
1095 __u32 handle;
1096 __u32 pad;
1097};
1098
1099struct drm_i915_gem_busy {
1100 /** Handle of the buffer to check for busy */
1101 __u32 handle;
1102
Chris Wilson426960b2016-01-15 16:51:46 +00001103 /** Return busy status
1104 *
1105 * A return of 0 implies that the object is idle (after
1106 * having flushed any pending activity), and a non-zero return that
1107 * the object is still in-flight on the GPU. (The GPU has not yet
1108 * signaled completion for all pending requests that reference the
Chris Wilson12555012016-08-16 09:50:40 +01001109 * object.) An object is guaranteed to become idle eventually (so
1110 * long as no new GPU commands are executed upon it). Due to the
1111 * asynchronous nature of the hardware, an object reported
1112 * as busy may become idle before the ioctl is completed.
1113 *
1114 * Furthermore, if the object is busy, which engine is busy is only
1115 * provided as a guide. There are race conditions which prevent the
1116 * report of which engines are busy from being always accurate.
1117 * However, the converse is not true. If the object is idle, the
1118 * result of the ioctl, that all engines are idle, is accurate.
Chris Wilson426960b2016-01-15 16:51:46 +00001119 *
1120 * The returned dword is split into two fields to indicate both
1121 * the engines on which the object is being read, and the
1122 * engine on which it is currently being written (if any).
1123 *
1124 * The low word (bits 0:15) indicate if the object is being written
1125 * to by any engine (there can only be one, as the GEM implicit
1126 * synchronisation rules force writes to be serialised). Only the
1127 * engine for the last write is reported.
1128 *
1129 * The high word (bits 16:31) are a bitmask of which engines are
1130 * currently reading from the object. Multiple engines may be
1131 * reading from the object simultaneously.
1132 *
1133 * The value of each engine is the same as specified in the
1134 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
1135 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
1136 * the I915_EXEC_RENDER engine for execution, and so it is never
1137 * reported as active itself. Some hardware may have parallel
1138 * execution engines, e.g. multiple media engines, which are
1139 * mapped to the same identifier in the EXECBUFFER2 ioctl and
1140 * so are not separately reported for busyness.
Chris Wilson12555012016-08-16 09:50:40 +01001141 *
1142 * Caveat emptor:
1143 * Only the boolean result of this query is reliable; that is whether
1144 * the object is idle or busy. The report of which engines are busy
1145 * should be only used as a heuristic.
David Howells718dced2012-10-04 18:21:50 +01001146 */
1147 __u32 busy;
1148};
1149
Daniel Vetter35c7ab42013-08-10 14:51:11 +02001150/**
1151 * I915_CACHING_NONE
1152 *
1153 * GPU access is not coherent with cpu caches. Default for machines without an
1154 * LLC.
1155 */
David Howells718dced2012-10-04 18:21:50 +01001156#define I915_CACHING_NONE 0
Daniel Vetter35c7ab42013-08-10 14:51:11 +02001157/**
1158 * I915_CACHING_CACHED
1159 *
1160 * GPU access is coherent with cpu caches and furthermore the data is cached in
1161 * last-level caches shared between cpu cores and the gpu GT. Default on
1162 * machines with HAS_LLC.
1163 */
David Howells718dced2012-10-04 18:21:50 +01001164#define I915_CACHING_CACHED 1
Daniel Vetter35c7ab42013-08-10 14:51:11 +02001165/**
1166 * I915_CACHING_DISPLAY
1167 *
1168 * Special GPU caching mode which is coherent with the scanout engines.
1169 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1170 * cache mode (like write-through or gfdt flushing) is available. The kernel
1171 * automatically sets this mode when using a buffer as a scanout target.
1172 * Userspace can manually set this mode to avoid a costly stall and clflush in
1173 * the hotpath of drawing the first frame.
1174 */
1175#define I915_CACHING_DISPLAY 2
David Howells718dced2012-10-04 18:21:50 +01001176
1177struct drm_i915_gem_caching {
1178 /**
1179 * Handle of the buffer to set/get the caching level of. */
1180 __u32 handle;
1181
1182 /**
1183 * Cacheing level to apply or return value
1184 *
1185 * bits0-15 are for generic caching control (i.e. the above defined
1186 * values). bits16-31 are reserved for platform-specific variations
1187 * (e.g. l3$ caching on gen7). */
1188 __u32 caching;
1189};
1190
1191#define I915_TILING_NONE 0
1192#define I915_TILING_X 1
1193#define I915_TILING_Y 2
Chris Wilsondeeb1512016-08-05 10:14:22 +01001194#define I915_TILING_LAST I915_TILING_Y
David Howells718dced2012-10-04 18:21:50 +01001195
1196#define I915_BIT_6_SWIZZLE_NONE 0
1197#define I915_BIT_6_SWIZZLE_9 1
1198#define I915_BIT_6_SWIZZLE_9_10 2
1199#define I915_BIT_6_SWIZZLE_9_11 3
1200#define I915_BIT_6_SWIZZLE_9_10_11 4
1201/* Not seen by userland */
1202#define I915_BIT_6_SWIZZLE_UNKNOWN 5
1203/* Seen by userland. */
1204#define I915_BIT_6_SWIZZLE_9_17 6
1205#define I915_BIT_6_SWIZZLE_9_10_17 7
1206
1207struct drm_i915_gem_set_tiling {
1208 /** Handle of the buffer to have its tiling state updated */
1209 __u32 handle;
1210
1211 /**
1212 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1213 * I915_TILING_Y).
1214 *
1215 * This value is to be set on request, and will be updated by the
1216 * kernel on successful return with the actual chosen tiling layout.
1217 *
1218 * The tiling mode may be demoted to I915_TILING_NONE when the system
1219 * has bit 6 swizzling that can't be managed correctly by GEM.
1220 *
1221 * Buffer contents become undefined when changing tiling_mode.
1222 */
1223 __u32 tiling_mode;
1224
1225 /**
1226 * Stride in bytes for the object when in I915_TILING_X or
1227 * I915_TILING_Y.
1228 */
1229 __u32 stride;
1230
1231 /**
1232 * Returned address bit 6 swizzling required for CPU access through
1233 * mmap mapping.
1234 */
1235 __u32 swizzle_mode;
1236};
1237
1238struct drm_i915_gem_get_tiling {
1239 /** Handle of the buffer to get tiling state for. */
1240 __u32 handle;
1241
1242 /**
1243 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1244 * I915_TILING_Y).
1245 */
1246 __u32 tiling_mode;
1247
1248 /**
1249 * Returned address bit 6 swizzling required for CPU access through
1250 * mmap mapping.
1251 */
1252 __u32 swizzle_mode;
Chris Wilson70f2f5c2014-10-24 12:11:11 +01001253
1254 /**
1255 * Returned address bit 6 swizzling required for CPU access through
1256 * mmap mapping whilst bound.
1257 */
1258 __u32 phys_swizzle_mode;
David Howells718dced2012-10-04 18:21:50 +01001259};
1260
1261struct drm_i915_gem_get_aperture {
1262 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1263 __u64 aper_size;
1264
1265 /**
1266 * Available space in the aperture used by i915_gem_execbuffer, in
1267 * bytes
1268 */
1269 __u64 aper_available_size;
1270};
1271
1272struct drm_i915_get_pipe_from_crtc_id {
1273 /** ID of CRTC being requested **/
1274 __u32 crtc_id;
1275
1276 /** pipe of requested CRTC **/
1277 __u32 pipe;
1278};
1279
1280#define I915_MADV_WILLNEED 0
1281#define I915_MADV_DONTNEED 1
1282#define __I915_MADV_PURGED 2 /* internal state */
1283
1284struct drm_i915_gem_madvise {
1285 /** Handle of the buffer to change the backing store advice */
1286 __u32 handle;
1287
1288 /* Advice: either the buffer will be needed again in the near future,
1289 * or wont be and could be discarded under memory pressure.
1290 */
1291 __u32 madv;
1292
1293 /** Whether the backing store still exists. */
1294 __u32 retained;
1295};
1296
1297/* flags */
1298#define I915_OVERLAY_TYPE_MASK 0xff
1299#define I915_OVERLAY_YUV_PLANAR 0x01
1300#define I915_OVERLAY_YUV_PACKED 0x02
1301#define I915_OVERLAY_RGB 0x03
1302
1303#define I915_OVERLAY_DEPTH_MASK 0xff00
1304#define I915_OVERLAY_RGB24 0x1000
1305#define I915_OVERLAY_RGB16 0x2000
1306#define I915_OVERLAY_RGB15 0x3000
1307#define I915_OVERLAY_YUV422 0x0100
1308#define I915_OVERLAY_YUV411 0x0200
1309#define I915_OVERLAY_YUV420 0x0300
1310#define I915_OVERLAY_YUV410 0x0400
1311
1312#define I915_OVERLAY_SWAP_MASK 0xff0000
1313#define I915_OVERLAY_NO_SWAP 0x000000
1314#define I915_OVERLAY_UV_SWAP 0x010000
1315#define I915_OVERLAY_Y_SWAP 0x020000
1316#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1317
1318#define I915_OVERLAY_FLAGS_MASK 0xff000000
1319#define I915_OVERLAY_ENABLE 0x01000000
1320
1321struct drm_intel_overlay_put_image {
1322 /* various flags and src format description */
1323 __u32 flags;
1324 /* source picture description */
1325 __u32 bo_handle;
1326 /* stride values and offsets are in bytes, buffer relative */
1327 __u16 stride_Y; /* stride for packed formats */
1328 __u16 stride_UV;
1329 __u32 offset_Y; /* offset for packet formats */
1330 __u32 offset_U;
1331 __u32 offset_V;
1332 /* in pixels */
1333 __u16 src_width;
1334 __u16 src_height;
1335 /* to compensate the scaling factors for partially covered surfaces */
1336 __u16 src_scan_width;
1337 __u16 src_scan_height;
1338 /* output crtc description */
1339 __u32 crtc_id;
1340 __u16 dst_x;
1341 __u16 dst_y;
1342 __u16 dst_width;
1343 __u16 dst_height;
1344};
1345
1346/* flags */
1347#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1348#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
Chris Wilsonea9da4e2015-04-02 10:35:08 +01001349#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
David Howells718dced2012-10-04 18:21:50 +01001350struct drm_intel_overlay_attrs {
1351 __u32 flags;
1352 __u32 color_key;
1353 __s32 brightness;
1354 __u32 contrast;
1355 __u32 saturation;
1356 __u32 gamma0;
1357 __u32 gamma1;
1358 __u32 gamma2;
1359 __u32 gamma3;
1360 __u32 gamma4;
1361 __u32 gamma5;
1362};
1363
1364/*
1365 * Intel sprite handling
1366 *
1367 * Color keying works with a min/mask/max tuple. Both source and destination
1368 * color keying is allowed.
1369 *
1370 * Source keying:
1371 * Sprite pixels within the min & max values, masked against the color channels
1372 * specified in the mask field, will be transparent. All other pixels will
1373 * be displayed on top of the primary plane. For RGB surfaces, only the min
1374 * and mask fields will be used; ranged compares are not allowed.
1375 *
1376 * Destination keying:
1377 * Primary plane pixels that match the min value, masked against the color
1378 * channels specified in the mask field, will be replaced by corresponding
1379 * pixels from the sprite plane.
1380 *
1381 * Note that source & destination keying are exclusive; only one can be
1382 * active on a given plane.
1383 */
1384
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +02001385#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1386 * flags==0 to disable colorkeying.
1387 */
David Howells718dced2012-10-04 18:21:50 +01001388#define I915_SET_COLORKEY_DESTINATION (1<<1)
1389#define I915_SET_COLORKEY_SOURCE (1<<2)
1390struct drm_intel_sprite_colorkey {
1391 __u32 plane_id;
1392 __u32 min_value;
1393 __u32 channel_mask;
1394 __u32 max_value;
1395 __u32 flags;
1396};
1397
1398struct drm_i915_gem_wait {
1399 /** Handle of BO we shall wait on */
1400 __u32 bo_handle;
1401 __u32 flags;
1402 /** Number of nanoseconds to wait, Returns time remaining. */
1403 __s64 timeout_ns;
1404};
1405
1406struct drm_i915_gem_context_create {
1407 /* output: id of new context*/
1408 __u32 ctx_id;
1409 __u32 pad;
1410};
1411
1412struct drm_i915_gem_context_destroy {
1413 __u32 ctx_id;
1414 __u32 pad;
1415};
1416
1417struct drm_i915_reg_read {
Ville Syrjälä86976002015-11-06 21:43:41 +02001418 /*
1419 * Register offset.
1420 * For 64bit wide registers where the upper 32bits don't immediately
1421 * follow the lower 32bits, the offset of the lower 32bits must
1422 * be specified
1423 */
David Howells718dced2012-10-04 18:21:50 +01001424 __u64 offset;
Joonas Lahtinen822a4b672017-10-06 13:45:59 +03001425#define I915_REG_READ_8B_WA (1ul << 0)
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001426
David Howells718dced2012-10-04 18:21:50 +01001427 __u64 val; /* Return value */
1428};
Chris Wilson648a9bc2015-07-16 12:37:56 +01001429/* Known registers:
1430 *
1431 * Render engine timestamp - 0x2358 + 64bit - gen7+
1432 * - Note this register returns an invalid value if using the default
Joonas Lahtinen3fd3a6f2017-09-13 14:52:55 +03001433 * single instruction 8byte read, in order to workaround that pass
1434 * flag I915_REG_READ_8B_WA in offset field.
Chris Wilson648a9bc2015-07-16 12:37:56 +01001435 *
1436 */
Mika Kuoppalab6359912013-10-30 15:44:16 +02001437
1438struct drm_i915_reset_stats {
1439 __u32 ctx_id;
1440 __u32 flags;
1441
1442 /* All resets since boot/module reload, for all contexts */
1443 __u32 reset_count;
1444
1445 /* Number of batches lost when active in GPU, for this context */
1446 __u32 batch_active;
1447
1448 /* Number of batches lost pending for execution, for this context */
1449 __u32 batch_pending;
1450
1451 __u32 pad;
1452};
1453
Chris Wilson5cc9ed42014-05-16 14:22:37 +01001454struct drm_i915_gem_userptr {
1455 __u64 user_ptr;
1456 __u64 user_size;
1457 __u32 flags;
1458#define I915_USERPTR_READ_ONLY 0x1
1459#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1460 /**
1461 * Returned handle for the object.
1462 *
1463 * Object handles are nonzero.
1464 */
1465 __u32 handle;
1466};
1467
Chris Wilsonc9dc0f32014-12-24 08:13:40 -08001468struct drm_i915_gem_context_param {
1469 __u32 ctx_id;
1470 __u32 size;
1471 __u64 param;
Chris Wilsonfa8848f2015-10-14 14:17:11 +01001472#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1473#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1474#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
Chris Wilsonbc3d6742016-07-04 08:08:39 +01001475#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
Mika Kuoppala84102172016-11-16 17:20:32 +02001476#define I915_CONTEXT_PARAM_BANNABLE 0x5
Chris Wilsonac14fbd2017-10-03 21:34:53 +01001477#define I915_CONTEXT_PARAM_PRIORITY 0x6
1478#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1479#define I915_CONTEXT_DEFAULT_PRIORITY 0
1480#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
Chris Wilsonc9dc0f32014-12-24 08:13:40 -08001481 __u64 value;
1482};
1483
Robert Braggd7965152016-11-07 19:49:52 +00001484enum drm_i915_oa_format {
Robert Bragg19f81df2017-06-13 12:23:03 +01001485 I915_OA_FORMAT_A13 = 1, /* HSW only */
1486 I915_OA_FORMAT_A29, /* HSW only */
1487 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1488 I915_OA_FORMAT_B4_C8, /* HSW only */
1489 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1490 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1491 I915_OA_FORMAT_C4_B8, /* HSW+ */
1492
1493 /* Gen8+ */
1494 I915_OA_FORMAT_A12,
1495 I915_OA_FORMAT_A12_B8_C8,
1496 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
Robert Braggd7965152016-11-07 19:49:52 +00001497
1498 I915_OA_FORMAT_MAX /* non-ABI */
1499};
1500
Robert Braggeec688e2016-11-07 19:49:47 +00001501enum drm_i915_perf_property_id {
1502 /**
1503 * Open the stream for a specific context handle (as used with
1504 * execbuffer2). A stream opened for a specific context this way
1505 * won't typically require root privileges.
1506 */
1507 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1508
Robert Braggd7965152016-11-07 19:49:52 +00001509 /**
1510 * A value of 1 requests the inclusion of raw OA unit reports as
1511 * part of stream samples.
1512 */
1513 DRM_I915_PERF_PROP_SAMPLE_OA,
1514
1515 /**
1516 * The value specifies which set of OA unit metrics should be
1517 * be configured, defining the contents of any OA unit reports.
1518 */
1519 DRM_I915_PERF_PROP_OA_METRICS_SET,
1520
1521 /**
1522 * The value specifies the size and layout of OA unit reports.
1523 */
1524 DRM_I915_PERF_PROP_OA_FORMAT,
1525
1526 /**
1527 * Specifying this property implicitly requests periodic OA unit
1528 * sampling and (at least on Haswell) the sampling frequency is derived
1529 * from this exponent as follows:
1530 *
1531 * 80ns * 2^(period_exponent + 1)
1532 */
1533 DRM_I915_PERF_PROP_OA_EXPONENT,
1534
Robert Braggeec688e2016-11-07 19:49:47 +00001535 DRM_I915_PERF_PROP_MAX /* non-ABI */
1536};
1537
1538struct drm_i915_perf_open_param {
1539 __u32 flags;
1540#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
1541#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
1542#define I915_PERF_FLAG_DISABLED (1<<2)
1543
1544 /** The number of u64 (id, value) pairs */
1545 __u32 num_properties;
1546
1547 /**
1548 * Pointer to array of u64 (id, value) pairs configuring the stream
1549 * to open.
1550 */
Chris Wilsoncd8bddc2016-11-30 16:46:49 +00001551 __u64 properties_ptr;
Robert Braggeec688e2016-11-07 19:49:47 +00001552};
1553
Robert Braggd7965152016-11-07 19:49:52 +00001554/**
1555 * Enable data capture for a stream that was either opened in a disabled state
1556 * via I915_PERF_FLAG_DISABLED or was later disabled via
1557 * I915_PERF_IOCTL_DISABLE.
1558 *
1559 * It is intended to be cheaper to disable and enable a stream than it may be
1560 * to close and re-open a stream with the same configuration.
1561 *
1562 * It's undefined whether any pending data for the stream will be lost.
1563 */
Robert Braggeec688e2016-11-07 19:49:47 +00001564#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
Robert Braggd7965152016-11-07 19:49:52 +00001565
1566/**
1567 * Disable data capture for a stream.
1568 *
1569 * It is an error to try and read a stream that is disabled.
1570 */
Robert Braggeec688e2016-11-07 19:49:47 +00001571#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1572
1573/**
1574 * Common to all i915 perf records
1575 */
1576struct drm_i915_perf_record_header {
1577 __u32 type;
1578 __u16 pad;
1579 __u16 size;
1580};
1581
1582enum drm_i915_perf_record_type {
1583
1584 /**
1585 * Samples are the work horse record type whose contents are extensible
1586 * and defined when opening an i915 perf stream based on the given
1587 * properties.
1588 *
1589 * Boolean properties following the naming convention
1590 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1591 * every sample.
1592 *
1593 * The order of these sample properties given by userspace has no
Robert Braggd7965152016-11-07 19:49:52 +00001594 * affect on the ordering of data within a sample. The order is
Robert Braggeec688e2016-11-07 19:49:47 +00001595 * documented here.
1596 *
1597 * struct {
1598 * struct drm_i915_perf_record_header header;
1599 *
Robert Braggd7965152016-11-07 19:49:52 +00001600 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
Robert Braggeec688e2016-11-07 19:49:47 +00001601 * };
1602 */
1603 DRM_I915_PERF_RECORD_SAMPLE = 1,
1604
Robert Braggd7965152016-11-07 19:49:52 +00001605 /*
1606 * Indicates that one or more OA reports were not written by the
1607 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1608 * command collides with periodic sampling - which would be more likely
1609 * at higher sampling frequencies.
1610 */
1611 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1612
1613 /**
1614 * An error occurred that resulted in all pending OA reports being lost.
1615 */
1616 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1617
Robert Braggeec688e2016-11-07 19:49:47 +00001618 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1619};
1620
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001621/**
1622 * Structure to upload perf dynamic configuration into the kernel.
1623 */
1624struct drm_i915_perf_oa_config {
1625 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1626 char uuid[36];
1627
1628 __u32 n_mux_regs;
1629 __u32 n_boolean_regs;
1630 __u32 n_flex_regs;
1631
Lionel Landwerlinee427e22017-09-18 12:42:41 +01001632 /*
Lionel Landwerlina446ae22018-03-06 12:28:56 +00001633 * These fields are pointers to tuples of u32 values (register address,
1634 * value). For example the expected length of the buffer pointed by
1635 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
Lionel Landwerlinee427e22017-09-18 12:42:41 +01001636 */
Chris Wilson17ad4fd2017-09-01 15:57:29 +01001637 __u64 mux_regs_ptr;
1638 __u64 boolean_regs_ptr;
1639 __u64 flex_regs_ptr;
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001640};
1641
Lionel Landwerlina446ae22018-03-06 12:28:56 +00001642struct drm_i915_query_item {
1643 __u64 query_id;
Lionel Landwerlinc822e052018-03-06 12:28:57 +00001644#define DRM_I915_QUERY_TOPOLOGY_INFO 1
Lionel Landwerlina446ae22018-03-06 12:28:56 +00001645
1646 /*
1647 * When set to zero by userspace, this is filled with the size of the
1648 * data to be written at the data_ptr pointer. The kernel sets this
1649 * value to a negative value to signal an error on a particular query
1650 * item.
1651 */
1652 __s32 length;
1653
1654 /*
1655 * Unused for now. Must be cleared to zero.
1656 */
1657 __u32 flags;
1658
1659 /*
1660 * Data will be written at the location pointed by data_ptr when the
1661 * value of length matches the length of the data to be written by the
1662 * kernel.
1663 */
1664 __u64 data_ptr;
1665};
1666
1667struct drm_i915_query {
1668 __u32 num_items;
1669
1670 /*
1671 * Unused for now. Must be cleared to zero.
1672 */
1673 __u32 flags;
1674
1675 /*
1676 * This points to an array of num_items drm_i915_query_item structures.
1677 */
1678 __u64 items_ptr;
1679};
1680
Lionel Landwerlinc822e052018-03-06 12:28:57 +00001681/*
1682 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1683 *
1684 * data: contains the 3 pieces of information :
1685 *
1686 * - the slice mask with one bit per slice telling whether a slice is
1687 * available. The availability of slice X can be queried with the following
1688 * formula :
1689 *
1690 * (data[X / 8] >> (X % 8)) & 1
1691 *
1692 * - the subslice mask for each slice with one bit per subslice telling
1693 * whether a subslice is available. The availability of subslice Y in slice
1694 * X can be queried with the following formula :
1695 *
1696 * (data[subslice_offset +
1697 * X * subslice_stride +
1698 * Y / 8] >> (Y % 8)) & 1
1699 *
1700 * - the EU mask for each subslice in each slice with one bit per EU telling
1701 * whether an EU is available. The availability of EU Z in subslice Y in
1702 * slice X can be queried with the following formula :
1703 *
1704 * (data[eu_offset +
1705 * (X * max_subslices + Y) * eu_stride +
1706 * Z / 8] >> (Z % 8)) & 1
1707 */
1708struct drm_i915_query_topology_info {
1709 /*
1710 * Unused for now. Must be cleared to zero.
1711 */
1712 __u16 flags;
1713
1714 __u16 max_slices;
1715 __u16 max_subslices;
1716 __u16 max_eus_per_subslice;
1717
1718 /*
1719 * Offset in data[] at which the subslice masks are stored.
1720 */
1721 __u16 subslice_offset;
1722
1723 /*
1724 * Stride at which each of the subslice masks for each slice are
1725 * stored.
1726 */
1727 __u16 subslice_stride;
1728
1729 /*
1730 * Offset in data[] at which the EU masks are stored.
1731 */
1732 __u16 eu_offset;
1733
1734 /*
1735 * Stride at which each of the EU masks for each subslice are stored.
1736 */
1737 __u16 eu_stride;
1738
1739 __u8 data[];
1740};
1741
Emil Velikovb1c1f5c2016-04-07 19:00:35 +01001742#if defined(__cplusplus)
1743}
1744#endif
1745
David Howells718dced2012-10-04 18:21:50 +01001746#endif /* _UAPI_I915_DRM_H_ */