blob: 910c83cf7d441e26d226e4c975ad712949d32f0a [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Brad Volkin44e895a2014-05-10 14:10:43 -07004#include <linux/hashtable.h>
5
6#define I915_CMD_HASH_ORDER 9
7
Ville Syrjälä633cf8f2012-12-03 18:43:32 +02008/*
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
11 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
12 *
13 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
14 * cacheline, the Head Pointer must not be greater than the Tail
15 * Pointer."
16 */
17#define I915_RING_FREE_SPACE 64
18
Zou Nan hai8187a2b2010-05-21 09:08:55 +080019struct intel_hw_status_page {
Daniel Vetter4225d0f2012-04-26 23:28:16 +020020 u32 *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080021 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +000022 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080023};
24
Ben Widawskyb7287d82011-04-25 11:22:22 -070025#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
26#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080027
Ben Widawskyb7287d82011-04-25 11:22:22 -070028#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
29#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080030
Ben Widawskyb7287d82011-04-25 11:22:22 -070031#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
32#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080033
Ben Widawskyb7287d82011-04-25 11:22:22 -070034#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
35#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080036
Ben Widawskyb7287d82011-04-25 11:22:22 -070037#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
38#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020039
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053040#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
Chris Wilson9991ae72014-04-02 16:36:07 +010041#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
Naresh Kumar Kachhie9fea572014-03-12 16:39:41 +053042
Jani Nikulaf2f4d822013-08-11 12:44:01 +030043enum intel_ring_hangcheck_action {
Mika Kuoppalada661462013-09-06 16:03:28 +030044 HANGCHECK_IDLE = 0,
Jani Nikulaf2f4d822013-08-11 12:44:01 +030045 HANGCHECK_WAIT,
46 HANGCHECK_ACTIVE,
47 HANGCHECK_KICK,
48 HANGCHECK_HUNG,
49};
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030050
Mika Kuoppalab6b0fac2014-01-30 19:04:43 +020051#define HANGCHECK_SCORE_RING_HUNG 31
52
Mika Kuoppala92cab732013-05-24 17:16:07 +030053struct intel_ring_hangcheck {
Chris Wilson50877442014-03-21 12:41:53 +000054 u64 acthd;
Mika Kuoppala92cab732013-05-24 17:16:07 +030055 u32 seqno;
Mika Kuoppala05407ff2013-05-30 09:04:29 +030056 int score;
Mika Kuoppalaad8beae2013-06-12 12:35:32 +030057 enum intel_ring_hangcheck_action action;
Chris Wilson50877442014-03-21 12:41:53 +000058 bool deadlock;
Mika Kuoppala92cab732013-05-24 17:16:07 +030059};
60
Oscar Mateo8ee14972014-05-22 14:13:34 +010061struct intel_ringbuffer {
62 struct drm_i915_gem_object *obj;
63 void __iomem *virtual_start;
64
65 u32 head;
66 u32 tail;
67 int space;
68 int size;
69 int effective_size;
70
71 /** We track the position of the requests in the ring buffer, and
72 * when each is retired we increment last_retired_head as the GPU
73 * must have finished processing the request and so we know we
74 * can advance the ringbuffer up to that position.
75 *
76 * last_retired_head is set to -1 after the value is consumed so
77 * we can detect new retirements.
78 */
79 u32 last_retired_head;
80};
81
Oscar Mateoa4872ba2014-05-22 14:13:33 +010082struct intel_engine_cs {
Zou Nan hai8187a2b2010-05-21 09:08:55 +080083 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010084 enum intel_ring_id {
Daniel Vetter96154f22011-12-14 13:57:00 +010085 RCS = 0x0,
86 VCS,
87 BCS,
Ben Widawsky4a3dd192013-05-28 19:22:19 -070088 VECS,
Zhao Yakui845f74a2014-04-17 10:37:37 +080089 VCS2
Chris Wilson92204342010-09-18 11:02:01 +010090 } id;
Zhao Yakui845f74a2014-04-17 10:37:37 +080091#define I915_NUM_RINGS 5
Zhao Yakuib1a93302014-04-17 10:37:36 +080092#define LAST_USER_RING (VECS + 1)
Daniel Vetter333e9fe2010-08-02 16:24:01 +020093 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080094 struct drm_device *dev;
Oscar Mateo8ee14972014-05-22 14:13:34 +010095 struct intel_ringbuffer *buffer;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080096
Zou Nan hai8187a2b2010-05-21 09:08:55 +080097 struct intel_hw_status_page status_page;
98
Daniel Vetterc7113cc2013-07-04 23:35:29 +020099 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
Daniel Vetter6a848cc2012-04-11 22:12:46 +0200100 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
Chris Wilsondb53a302011-02-03 11:57:46 +0000101 u32 trace_irq_seqno;
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100102 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
103 void (*irq_put)(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800104
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100105 int (*init)(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800106
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100107 void (*write_tail)(struct intel_engine_cs *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +0100108 u32 value);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100109 int __must_check (*flush)(struct intel_engine_cs *ring,
Chris Wilsonb72f3ac2011-01-04 17:34:02 +0000110 u32 invalidate_domains,
111 u32 flush_domains);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100112 int (*add_request)(struct intel_engine_cs *ring);
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100113 /* Some chipsets are not quite as coherent as advertised and need
114 * an expensive kick to force a true read of the up-to-date seqno.
115 * However, the up-to-date seqno is not always required and the last
116 * seen value is good enough. Note that the seqno will always be
117 * monotonic, even if not coherent.
118 */
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100119 u32 (*get_seqno)(struct intel_engine_cs *ring,
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100120 bool lazy_coherency);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100121 void (*set_seqno)(struct intel_engine_cs *ring,
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200122 u32 seqno);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100123 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
Ben Widawsky9bcb1442014-04-28 19:29:25 -0700124 u64 offset, u32 length,
Chris Wilsond7d4eed2012-10-17 12:09:54 +0100125 unsigned flags);
126#define I915_DISPATCH_SECURE 0x1
Daniel Vetterb45305f2012-12-17 16:21:27 +0100127#define I915_DISPATCH_PINNED 0x2
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100128 void (*cleanup)(struct intel_engine_cs *ring);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700129
130 struct {
131 u32 sync_seqno[I915_NUM_RINGS-1];
Ben Widawsky78325f22014-04-29 14:52:29 -0700132
Ben Widawskyebc348b2014-04-29 14:52:28 -0700133 struct {
134 /* our mbox written by others */
135 u32 wait[I915_NUM_RINGS];
136 /* mboxes this ring signals to */
137 u32 signal[I915_NUM_RINGS];
138 } mbox;
Ben Widawsky78325f22014-04-29 14:52:29 -0700139
140 /* AKA wait() */
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100141 int (*sync_to)(struct intel_engine_cs *ring,
142 struct intel_engine_cs *to,
Ben Widawsky78325f22014-04-29 14:52:29 -0700143 u32 seqno);
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100144 int (*signal)(struct intel_engine_cs *signaller,
Ben Widawsky024a43e2014-04-29 14:52:30 -0700145 /* num_dwords needed by caller */
146 unsigned int num_dwords);
Ben Widawskyebc348b2014-04-29 14:52:28 -0700147 } semaphore;
Ben Widawskyad776f82013-05-28 19:22:18 -0700148
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800149 /**
150 * List of objects currently involved in rendering from the
151 * ringbuffer.
152 *
153 * Includes buffers having the contents of their GPU caches
154 * flushed, not necessarily primitives. last_rendering_seqno
155 * represents when the rendering involved will be completed.
156 *
157 * A reference is held on the buffer while on this list.
158 */
159 struct list_head active_list;
160
161 /**
162 * List of breadcrumbs associated with GPU requests currently
163 * outstanding.
164 */
165 struct list_head request_list;
166
Chris Wilsona56ba562010-09-28 10:07:56 +0100167 /**
168 * Do we have some not yet emitted requests outstanding?
169 */
Chris Wilson3c0e2342013-09-04 10:45:52 +0100170 struct drm_i915_gem_request *preallocated_lazy_request;
Chris Wilson18235212013-09-04 10:45:51 +0100171 u32 outstanding_lazy_seqno;
Daniel Vettercc889e02012-06-13 20:45:19 +0200172 bool gpu_caches_dirty;
Chris Wilsonc65355b2013-06-06 16:53:41 -0300173 bool fbc_dirty;
Chris Wilsona56ba562010-09-28 10:07:56 +0100174
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800175 wait_queue_head_t irq_queue;
Zou Nan hai8d192152010-11-02 16:31:01 +0800176
Oscar Mateo273497e2014-05-22 14:13:37 +0100177 struct intel_context *default_context;
178 struct intel_context *last_context;
Ben Widawsky40521052012-06-04 14:42:43 -0700179
Mika Kuoppala92cab732013-05-24 17:16:07 +0300180 struct intel_ring_hangcheck hangcheck;
181
Chris Wilson0d1aaca2013-08-26 20:58:11 +0100182 struct {
183 struct drm_i915_gem_object *obj;
184 u32 gtt_offset;
185 volatile u32 *cpu_page;
186 } scratch;
Brad Volkin351e3db2014-02-18 10:15:46 -0800187
Brad Volkin44e895a2014-05-10 14:10:43 -0700188 bool needs_cmd_parser;
189
Brad Volkin351e3db2014-02-18 10:15:46 -0800190 /*
Brad Volkin44e895a2014-05-10 14:10:43 -0700191 * Table of commands the command parser needs to know about
Brad Volkin351e3db2014-02-18 10:15:46 -0800192 * for this ring.
193 */
Brad Volkin44e895a2014-05-10 14:10:43 -0700194 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
Brad Volkin351e3db2014-02-18 10:15:46 -0800195
196 /*
197 * Table of registers allowed in commands that read/write registers.
198 */
199 const u32 *reg_table;
200 int reg_count;
201
202 /*
203 * Table of registers allowed in commands that read/write registers, but
204 * only from the DRM master.
205 */
206 const u32 *master_reg_table;
207 int master_reg_count;
208
209 /*
210 * Returns the bitmask for the length field of the specified command.
211 * Return 0 for an unrecognized/invalid command.
212 *
213 * If the command parser finds an entry for a command in the ring's
214 * cmd_tables, it gets the command's length based on the table entry.
215 * If not, it calls this function to determine the per-ring length field
216 * encoding for the command (i.e. certain opcode ranges use certain bits
217 * to encode the command length in the header).
218 */
219 u32 (*get_cmd_length_mask)(u32 cmd_header);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800220};
221
Chris Wilsonb4519512012-05-11 14:29:30 +0100222static inline bool
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100223intel_ring_initialized(struct intel_engine_cs *ring)
Chris Wilsonb4519512012-05-11 14:29:30 +0100224{
Oscar Mateoee1b1e52014-05-22 14:13:35 +0100225 return ring->buffer && ring->buffer->obj;
Chris Wilsonb4519512012-05-11 14:29:30 +0100226}
227
Daniel Vetter96154f22011-12-14 13:57:00 +0100228static inline unsigned
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100229intel_ring_flag(struct intel_engine_cs *ring)
Daniel Vetter96154f22011-12-14 13:57:00 +0100230{
231 return 1 << ring->id;
232}
233
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800234static inline u32
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100235intel_ring_sync_index(struct intel_engine_cs *ring,
236 struct intel_engine_cs *other)
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000237{
238 int idx;
239
240 /*
241 * cs -> 0 = vcs, 1 = bcs
242 * vcs -> 0 = bcs, 1 = cs,
243 * bcs -> 0 = cs, 1 = vcs.
244 */
245
246 idx = (other - ring) - 1;
247 if (idx < 0)
248 idx += I915_NUM_RINGS;
249
250 return idx;
251}
252
253static inline u32
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100254intel_read_status_page(struct intel_engine_cs *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100255 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800256{
Daniel Vetter4225d0f2012-04-26 23:28:16 +0200257 /* Ensure that the compiler doesn't optimize away the load. */
258 barrier();
259 return ring->status_page.page_addr[reg];
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800260}
261
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200262static inline void
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100263intel_write_status_page(struct intel_engine_cs *ring,
Mika Kuoppalab70ec5b2012-12-19 11:13:05 +0200264 int reg, u32 value)
265{
266 ring->status_page.page_addr[reg] = value;
267}
268
Chris Wilson311bd682011-01-13 19:06:50 +0000269/**
270 * Reads a dword out of the status page, which is written to from the command
271 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
272 * MI_STORE_DATA_IMM.
273 *
274 * The following dwords have a reserved meaning:
275 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
276 * 0x04: ring 0 head pointer
277 * 0x05: ring 1 head pointer (915-class)
278 * 0x06: ring 2 head pointer (915-class)
279 * 0x10-0x1b: Context status DWords (GM45)
280 * 0x1f: Last written status offset. (GM45)
281 *
282 * The area from dword 0x20 to 0x3ff is available for driver usage.
283 */
Chris Wilson311bd682011-01-13 19:06:50 +0000284#define I915_GEM_HWS_INDEX 0x20
Jesse Barnes9a289772012-10-26 09:42:42 -0700285#define I915_GEM_HWS_SCRATCH_INDEX 0x30
286#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
Chris Wilson311bd682011-01-13 19:06:50 +0000287
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100288void intel_stop_ring_buffer(struct intel_engine_cs *ring);
289void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
Ben Widawsky96f298a2011-03-19 18:14:27 -0700290
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100291int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
292int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
293static inline void intel_ring_emit(struct intel_engine_cs *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100294 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100295{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100296 struct intel_ringbuffer *ringbuf = ring->buffer;
297 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
298 ringbuf->tail += 4;
Chris Wilsone898cd22010-08-04 15:18:14 +0100299}
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100300static inline void intel_ring_advance(struct intel_engine_cs *ring)
Chris Wilson09246732013-08-10 22:16:32 +0100301{
Oscar Mateo93b0a4e2014-05-22 14:13:36 +0100302 struct intel_ringbuffer *ringbuf = ring->buffer;
303 ringbuf->tail &= ringbuf->size - 1;
Chris Wilson09246732013-08-10 22:16:32 +0100304}
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100305void __intel_ring_advance(struct intel_engine_cs *ring);
Chris Wilson09246732013-08-10 22:16:32 +0100306
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100307int __must_check intel_ring_idle(struct intel_engine_cs *ring);
308void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
309int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
310int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800311
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800312int intel_init_render_ring_buffer(struct drm_device *dev);
313int intel_init_bsd_ring_buffer(struct drm_device *dev);
Zhao Yakui845f74a2014-04-17 10:37:37 +0800314int intel_init_bsd2_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100315int intel_init_blt_ring_buffer(struct drm_device *dev);
Ben Widawsky9a8a2212013-05-28 19:22:23 -0700316int intel_init_vebox_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800317
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100318u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
319void intel_ring_setup_status_page(struct intel_engine_cs *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200320
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100321static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
Chris Wilsona71d8d92012-02-15 11:25:36 +0000322{
Oscar Mateoee1b1e52014-05-22 14:13:35 +0100323 return ring->buffer->tail;
Chris Wilsona71d8d92012-02-15 11:25:36 +0000324}
325
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100326static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
Chris Wilson9d7730912012-11-27 16:22:52 +0000327{
Chris Wilson18235212013-09-04 10:45:51 +0100328 BUG_ON(ring->outstanding_lazy_seqno == 0);
329 return ring->outstanding_lazy_seqno;
Chris Wilson9d7730912012-11-27 16:22:52 +0000330}
331
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100332static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
Chris Wilsondb53a302011-02-03 11:57:46 +0000333{
334 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
335 ring->trace_irq_seqno = seqno;
336}
337
Chris Wilsone8616b62011-01-20 09:57:11 +0000338/* DRI warts */
339int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
340
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800341#endif /* _INTEL_RINGBUFFER_H_ */