blob: 6dbb6f4620074dd5b909a620c243690600c3ce04 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Brad Volkin44e895a2014-05-10 14:10:43 -07004#include <linux/hashtable.h>
5
6#define I915_CMD_HASH_ORDER 9
7
Oscar Mateo47122742014-07-24 17:04:28 +01008/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
9 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
10 * to give some inclination as to some of the magic values used in the various
11 * workarounds!
12 */
13#define CACHELINE_BYTES 64
14
Ville Syrjälä633cf8f2012-12-03 18:43:32 +020015/*
16 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
17 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
18 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
19 *
20 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
21 * cacheline, the Head Pointer must not be greater than the Tail
22 * Pointer."
23 */
24#define I915_RING_FREE_SPACE 64
25
Zou Nan hai8187a2b2010-05-21 09:08:55 +080026struct intel_hw_status_page {
Daniel Vetter4225d0f2012-04-26 23:28:16 +020027 u32 *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080028 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +000029 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080030};
31
Ben Widawskyb7287d82011-04-25 11:22:22 -070032#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
33#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080034
Ben Widawskyb7287d82011-04-25 11:22:22 -070035#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
36#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080037
Ben Widawskyb7287d82011-04-25 11:22:22 -070038#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
39#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080040
Ben Widawskyb7287d82011-04-25 11:22:22 -070041#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
42#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080043
Ben Widawskyb7287d82011-04-25 11:22:22 -070044#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
45#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020046
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053047#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
Chris Wilson9991ae72014-04-02 16:36:07 +010048#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053049
Ben Widawsky3e789982014-06-30 09:53:37 -070050/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
51 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
52 */
53#define i915_semaphore_seqno_size sizeof(uint64_t)
54#define GEN8_SIGNAL_OFFSET(__ring, to) \
55 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
56 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
57 (i915_semaphore_seqno_size * (to)))
58
59#define GEN8_WAIT_OFFSET(__ring, from) \
60 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
61 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
62 (i915_semaphore_seqno_size * (__ring)->id))
63
64#define GEN8_RING_SEMAPHORE_INIT do { \
65 if (!dev_priv->semaphore_obj) { \
66 break; \
67 } \
68 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
69 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
70 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
71 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
72 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
73 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
74 } while(0)
75
Jani Nikulaf2f4d822013-08-11 12:44:01 +030076enum intel_ring_hangcheck_action {
Mika Kuoppalada661462013-09-06 16:03:28 +030077 HANGCHECK_IDLE = 0,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030078 HANGCHECK_WAIT,
79 HANGCHECK_ACTIVE,
Mika Kuoppalaf260fe72014-08-05 17:16:26 +030080 HANGCHECK_ACTIVE_LOOP,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030081 HANGCHECK_KICK,
82 HANGCHECK_HUNG,
83};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030084
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +020085#define HANGCHECK_SCORE_RING_HUNG 31
86
Mika Kuoppala92cab732013-05-24 17:16:07 +030087struct intel_ring_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +000088 u64 acthd;
Mika Kuoppalaf260fe72014-08-05 17:16:26 +030089 u64 max_acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +030090 u32 seqno;
Mika Kuoppala05407ff2013-05-30 09:04:29 +030091 int score;
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030092 enum intel_ring_hangcheck_action action;
Chris Wilson4be17382014-06-06 10:22:29 +010093 int deadlock;
Mika Kuoppala92cab732013-05-24 17:16:07 +030094};
95
Oscar Mateo8ee14972014-05-22 14:13:34 +010096struct intel_ringbuffer {
97 struct drm_i915_gem_object *obj;
98 void __iomem *virtual_start;
99
Daniel Vetter0c7dd532014-08-11 16:17:44 +0200100 struct intel_engine_cs *ring;
101
Oscar Mateo582d67f2014-07-24 17:04:16 +0100102 /*
103 * FIXME: This backpointer is an artifact of the history of how the
104 * execlist patches came into being. It will get removed once the basic
105 * code has landed.
106 */
107 struct intel_context *FIXME_lrc_ctx;
108
Oscar Mateo8ee14972014-05-22 14:13:34 +0100109 u32 head;
110 u32 tail;
111 int space;
112 int size;
113 int effective_size;
114
115 /** We track the position of the requests in the ring buffer, and
116 * when each is retired we increment last_retired_head as the GPU
117 * must have finished processing the request and so we know we
118 * can advance the ringbuffer up to that position.
119 *
120 * last_retired_head is set to -1 after the value is consumed so
121 * we can detect new retirements.
122 */
123 u32 last_retired_head;
124};
125
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100126struct intel_engine_cs {
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800127 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +0100128 enum intel_ring_id {
Daniel Vetter96154f22011-12-14 13:57:00 +0100129 RCS = 0x0,
130 VCS,
131 BCS,
Ben Widawsky4a3dd192013-05-28 19:22:19 -0700132 VECS,
Zhao Yakui845f74a2014-04-17 10:37:37 +0800133 VCS2
Chris Wilson92204342010-09-18 11:02:01 +0100134 } id;
Zhao Yakui845f74a2014-04-17 10:37:37 +0800135#define I915_NUM_RINGS 5
Zhao Yakuib1a93302014-04-17 10:37:36 +0800136#define LAST_USER_RING (VECS + 1)
Daniel Vetter333e9fe2010-08-02 16:24:01 +0200137 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800138 struct drm_device *dev;
Oscar Mateo8ee14972014-05-22 14:13:34 +0100139 struct intel_ringbuffer *buffer;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800140
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800141 struct intel_hw_status_page status_page;
142
Daniel Vetterc7113cc2013-07-04 23:35:29 +0200143 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
Daniel Vetter6a848cc2012-04-11 22:12:46 +0200144 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
John Harrison581c26e82014-11-24 18:49:39 +0000145 struct drm_i915_gem_request *trace_irq_req;
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100146 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
147 void (*irq_put)(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800148
Daniel Vetterecfe00d2014-11-20 00:33:04 +0100149 int (*init_hw)(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800150
Michel Thierry771b9a52014-11-11 16:47:33 +0000151 int (*init_context)(struct intel_engine_cs *ring,
152 struct intel_context *ctx);
Arun Siluvery86d7f232014-08-26 14:44:50 +0100153
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100154 void (*write_tail)(struct intel_engine_cs *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100155 u32 value);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100156 int __must_check (*flush)(struct intel_engine_cs *ring,
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000157 u32 invalidate_domains,
158 u32 flush_domains);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100159 int (*add_request)(struct intel_engine_cs *ring);
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100160 /* Some chipsets are not quite as coherent as advertised and need
161 * an expensive kick to force a true read of the up-to-date seqno.
162 * However, the up-to-date seqno is not always required and the last
163 * seen value is good enough. Note that the seqno will always be
164 * monotonic, even if not coherent.
165 */
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100166 u32 (*get_seqno)(struct intel_engine_cs *ring,
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100167 bool lazy_coherency);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100168 void (*set_seqno)(struct intel_engine_cs *ring,
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200169 u32 seqno);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100170 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
Ben Widawsky9bcb1442014-04-28 19:29:25 -0700171 u64 offset, u32 length,
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100172 unsigned flags);
173#define I915_DISPATCH_SECURE 0x1
Daniel Vetterb45305f2012-12-17 16:21:27 +0100174#define I915_DISPATCH_PINNED 0x2
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100175 void (*cleanup)(struct intel_engine_cs *ring);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700176
Ben Widawsky3e789982014-06-30 09:53:37 -0700177 /* GEN8 signal/wait table - never trust comments!
178 * signal to signal to signal to signal to signal to
179 * RCS VCS BCS VECS VCS2
180 * --------------------------------------------------------------------
181 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
182 * |-------------------------------------------------------------------
183 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
184 * |-------------------------------------------------------------------
185 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
186 * |-------------------------------------------------------------------
187 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
188 * |-------------------------------------------------------------------
189 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
190 * |-------------------------------------------------------------------
191 *
192 * Generalization:
193 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
194 * ie. transpose of g(x, y)
195 *
196 * sync from sync from sync from sync from sync from
197 * RCS VCS BCS VECS VCS2
198 * --------------------------------------------------------------------
199 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
200 * |-------------------------------------------------------------------
201 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
202 * |-------------------------------------------------------------------
203 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
204 * |-------------------------------------------------------------------
205 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
206 * |-------------------------------------------------------------------
207 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
208 * |-------------------------------------------------------------------
209 *
210 * Generalization:
211 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
212 * ie. transpose of f(x, y)
213 */
Ben Widawskyebc348b2014-04-29 14:52:28 -0700214 struct {
215 u32 sync_seqno[I915_NUM_RINGS-1];
Ben Widawsky78325f22014-04-29 14:52:29 -0700216
Ben Widawsky3e789982014-06-30 09:53:37 -0700217 union {
218 struct {
219 /* our mbox written by others */
220 u32 wait[I915_NUM_RINGS];
221 /* mboxes this ring signals to */
222 u32 signal[I915_NUM_RINGS];
223 } mbox;
224 u64 signal_ggtt[I915_NUM_RINGS];
225 };
Ben Widawsky78325f22014-04-29 14:52:29 -0700226
227 /* AKA wait() */
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100228 int (*sync_to)(struct intel_engine_cs *ring,
229 struct intel_engine_cs *to,
Ben Widawsky78325f22014-04-29 14:52:29 -0700230 u32 seqno);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100231 int (*signal)(struct intel_engine_cs *signaller,
Ben Widawsky024a43e2014-04-29 14:52:30 -0700232 /* num_dwords needed by caller */
233 unsigned int num_dwords);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700234 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700235
Oscar Mateo4da46e12014-07-24 17:04:27 +0100236 /* Execlists */
Michel Thierryacdd8842014-07-24 17:04:38 +0100237 spinlock_t execlist_lock;
238 struct list_head execlist_queue;
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000239 struct list_head execlist_retired_req_list;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100240 u8 next_context_status_buffer;
Oscar Mateo73d477f2014-07-24 17:04:31 +0100241 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
Oscar Mateo4da46e12014-07-24 17:04:27 +0100242 int (*emit_request)(struct intel_ringbuffer *ringbuf);
Oscar Mateo47122742014-07-24 17:04:28 +0100243 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
244 u32 invalidate_domains,
245 u32 flush_domains);
Oscar Mateo15648582014-07-24 17:04:32 +0100246 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
247 u64 offset, unsigned flags);
Oscar Mateo4da46e12014-07-24 17:04:27 +0100248
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800249 /**
250 * List of objects currently involved in rendering from the
251 * ringbuffer.
252 *
253 * Includes buffers having the contents of their GPU caches
John Harrison97b2a6a2014-11-24 18:49:26 +0000254 * flushed, not necessarily primitives. last_read_req
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800255 * represents when the rendering involved will be completed.
256 *
257 * A reference is held on the buffer while on this list.
258 */
259 struct list_head active_list;
260
261 /**
262 * List of breadcrumbs associated with GPU requests currently
263 * outstanding.
264 */
265 struct list_head request_list;
266
Chris Wilsona56ba562010-09-28 10:07:56 +0100267 /**
268 * Do we have some not yet emitted requests outstanding?
269 */
John Harrison6259cea2014-11-24 18:49:29 +0000270 struct drm_i915_gem_request *outstanding_lazy_request;
Daniel Vettercc889e02012-06-13 20:45:19 +0200271 bool gpu_caches_dirty;
Chris Wilsonc65355b2013-06-06 16:53:41 -0300272 bool fbc_dirty;
Chris Wilsona56ba562010-09-28 10:07:56 +0100273
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800274 wait_queue_head_t irq_queue;
Zou Nan hai8d192152010-11-02 16:31:01 +0800275
Oscar Mateo273497e2014-05-22 14:13:37 +0100276 struct intel_context *default_context;
277 struct intel_context *last_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700278
Mika Kuoppala92cab732013-05-24 17:16:07 +0300279 struct intel_ring_hangcheck hangcheck;
280
Chris Wilson0d1aaca2013-08-26 20:58:11 +0100281 struct {
282 struct drm_i915_gem_object *obj;
283 u32 gtt_offset;
284 volatile u32 *cpu_page;
285 } scratch;
Brad Volkin351e3db2014-02-18 10:15:46 -0800286
Brad Volkin44e895a2014-05-10 14:10:43 -0700287 bool needs_cmd_parser;
288
Brad Volkin351e3db2014-02-18 10:15:46 -0800289 /*
Brad Volkin44e895a2014-05-10 14:10:43 -0700290 * Table of commands the command parser needs to know about
Brad Volkin351e3db2014-02-18 10:15:46 -0800291 * for this ring.
292 */
Brad Volkin44e895a2014-05-10 14:10:43 -0700293 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
Brad Volkin351e3db2014-02-18 10:15:46 -0800294
295 /*
296 * Table of registers allowed in commands that read/write registers.
297 */
298 const u32 *reg_table;
299 int reg_count;
300
301 /*
302 * Table of registers allowed in commands that read/write registers, but
303 * only from the DRM master.
304 */
305 const u32 *master_reg_table;
306 int master_reg_count;
307
308 /*
309 * Returns the bitmask for the length field of the specified command.
310 * Return 0 for an unrecognized/invalid command.
311 *
312 * If the command parser finds an entry for a command in the ring's
313 * cmd_tables, it gets the command's length based on the table entry.
314 * If not, it calls this function to determine the per-ring length field
315 * encoding for the command (i.e. certain opcode ranges use certain bits
316 * to encode the command length in the header).
317 */
318 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800319};
320
Oscar Mateo48d82382014-07-24 17:04:23 +0100321bool intel_ring_initialized(struct intel_engine_cs *ring);
Chris Wilsonb4519512012-05-11 14:29:30 +0100322
Daniel Vetter96154f22011-12-14 13:57:00 +0100323static inline unsigned
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100324intel_ring_flag(struct intel_engine_cs *ring)
Daniel Vetter96154f22011-12-14 13:57:00 +0100325{
326 return 1 << ring->id;
327}
328
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800329static inline u32
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100330intel_ring_sync_index(struct intel_engine_cs *ring,
331 struct intel_engine_cs *other)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000332{
333 int idx;
334
335 /*
Rodrigo Vividdd4dbc2014-06-30 09:51:11 -0700336 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
337 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
338 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
339 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
340 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000341 */
342
343 idx = (other - ring) - 1;
344 if (idx < 0)
345 idx += I915_NUM_RINGS;
346
347 return idx;
348}
349
350static inline u32
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100351intel_read_status_page(struct intel_engine_cs *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100352 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800353{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200354 /* Ensure that the compiler doesn't optimize away the load. */
355 barrier();
356 return ring->status_page.page_addr[reg];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800357}
358
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200359static inline void
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100360intel_write_status_page(struct intel_engine_cs *ring,
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200361 int reg, u32 value)
362{
363 ring->status_page.page_addr[reg] = value;
364}
365
Chris Wilson311bd682011-01-13 19:06:50 +0000366/**
367 * Reads a dword out of the status page, which is written to from the command
368 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
369 * MI_STORE_DATA_IMM.
370 *
371 * The following dwords have a reserved meaning:
372 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
373 * 0x04: ring 0 head pointer
374 * 0x05: ring 1 head pointer (915-class)
375 * 0x06: ring 2 head pointer (915-class)
376 * 0x10-0x1b: Context status DWords (GM45)
377 * 0x1f: Last written status offset. (GM45)
378 *
379 * The area from dword 0x20 to 0x3ff is available for driver usage.
380 */
Chris Wilson311bd682011-01-13 19:06:50 +0000381#define I915_GEM_HWS_INDEX 0x20
Jesse Barnes9a289772012-10-26 09:42:42 -0700382#define I915_GEM_HWS_SCRATCH_INDEX 0x30
383#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000384
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000385void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
386int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
387 struct intel_ringbuffer *ringbuf);
Oscar Mateo84c23772014-07-24 17:04:15 +0100388void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
389int intel_alloc_ringbuffer_obj(struct drm_device *dev,
390 struct intel_ringbuffer *ringbuf);
391
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100392void intel_stop_ring_buffer(struct intel_engine_cs *ring);
393void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700394
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100395int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
396int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
397static inline void intel_ring_emit(struct intel_engine_cs *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100398 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100399{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100400 struct intel_ringbuffer *ringbuf = ring->buffer;
401 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
402 ringbuf->tail += 4;
Chris Wilsone898cd22010-08-04 15:18:14 +0100403}
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100404static inline void intel_ring_advance(struct intel_engine_cs *ring)
Chris Wilson09246732013-08-10 22:16:32 +0100405{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100406 struct intel_ringbuffer *ringbuf = ring->buffer;
407 ringbuf->tail &= ringbuf->size - 1;
Chris Wilson09246732013-08-10 22:16:32 +0100408}
Oscar Mateo82e104c2014-07-24 17:04:26 +0100409int __intel_ring_space(int head, int tail, int size);
Dave Gordonebd0fd42014-11-27 11:22:49 +0000410void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
Oscar Mateo82e104c2014-07-24 17:04:26 +0100411int intel_ring_space(struct intel_ringbuffer *ringbuf);
412bool intel_ring_stopped(struct intel_engine_cs *ring);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100413void __intel_ring_advance(struct intel_engine_cs *ring);
Chris Wilson09246732013-08-10 22:16:32 +0100414
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100415int __must_check intel_ring_idle(struct intel_engine_cs *ring);
416void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
417int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
418int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800419
Oscar Mateo9b1136d2014-07-24 17:04:24 +0100420void intel_fini_pipe_control(struct intel_engine_cs *ring);
421int intel_init_pipe_control(struct intel_engine_cs *ring);
422
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800423int intel_init_render_ring_buffer(struct drm_device *dev);
424int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zhao Yakui845f74a2014-04-17 10:37:37 +0800425int intel_init_bsd2_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100426int intel_init_blt_ring_buffer(struct drm_device *dev);
Ben Widawsky9a8a2212013-05-28 19:22:23 -0700427int intel_init_vebox_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800428
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100429u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
430void intel_ring_setup_status_page(struct intel_engine_cs *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200431
Michel Thierry771b9a52014-11-11 16:47:33 +0000432int init_workarounds_ring(struct intel_engine_cs *ring);
433
Oscar Mateo1b5d0632014-07-03 16:28:04 +0100434static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
Chris Wilsona71d8d92012-02-15 11:25:36 +0000435{
Oscar Mateo1b5d0632014-07-03 16:28:04 +0100436 return ringbuf->tail;
Chris Wilsona71d8d92012-02-15 11:25:36 +0000437}
438
John Harrisonb793a002014-11-24 18:49:25 +0000439static inline struct drm_i915_gem_request *
440intel_ring_get_request(struct intel_engine_cs *ring)
441{
John Harrison6259cea2014-11-24 18:49:29 +0000442 BUG_ON(ring->outstanding_lazy_request == NULL);
443 return ring->outstanding_lazy_request;
John Harrisonb793a002014-11-24 18:49:25 +0000444}
445
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800446#endif /* _INTEL_RINGBUFFER_H_ */