blob: 676b53200e943656d829acda5c1f1c189f88fb70 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100134#include <linux/interrupt.h>
Oscar Mateob20385f2014-07-24 17:04:10 +0100135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300139#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100140
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
Thomas Daniele981e7b2014-07-24 17:04:39 +0100145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
Ben Widawsky84b790f2014-07-24 17:04:36 +0100188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
Michel Thierrye5815a22015-04-08 12:13:32 +0100193
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200203} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100204
Ville Syrjälä9244a812015-11-04 23:20:09 +0200205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200208} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100209
Ben Widawsky84b790f2014-07-24 17:04:36 +0100210enum {
Ben Widawsky84b790f2014-07-24 17:04:36 +0100211 FAULT_AND_HANG = 0,
212 FAULT_AND_HALT, /* Debug only */
213 FAULT_AND_STREAM,
214 FAULT_AND_CONTINUE /* Unsupported */
215};
216#define GEN8_CTX_ID_SHIFT 32
Chris Wilson7069b142016-04-28 09:56:52 +0100217#define GEN8_CTX_ID_WIDTH 21
Michel Thierry71562912016-02-23 10:31:49 +0000218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100220
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
Chris Wilsone2efd132016-05-24 14:53:34 +0100224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +0100225 struct intel_engine_cs *engine);
Chris Wilsone2efd132016-05-24 14:53:34 +0100226static int intel_lr_context_pin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000227 struct intel_engine_cs *engine);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000228
Oscar Mateo73e4d072014-07-24 17:04:48 +0100229/**
230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100231 * @dev_priv: i915 device private
Oscar Mateo73e4d072014-07-24 17:04:48 +0100232 * @enable_execlists: value of i915.enable_execlists module parameter.
233 *
234 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000235 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100236 *
237 * Return: 1 if Execlists is supported and has to be enabled.
238 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
Oscar Mateo127f1002014-07-24 17:04:11 +0100240{
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800241 /* On platforms with execlist available, vGPU will only
242 * support execlist mode, no ring buffer mode.
243 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800245 return 1;
246
Chris Wilsonc0336662016-05-06 15:40:21 +0100247 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000248 return 1;
249
Oscar Mateo127f1002014-07-24 17:04:11 +0100250 if (enable_execlists == 0)
251 return 0;
252
Daniel Vetter5a21b662016-05-24 17:13:53 +0200253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
255 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100256 return 1;
257
258 return 0;
259}
Oscar Mateoede7d422014-07-24 17:04:12 +0100260
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000261static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000263{
Chris Wilsonc0336662016-05-06 15:40:21 +0100264 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000265
Chris Wilsonc0336662016-05-06 15:40:21 +0100266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000267 engine->idle_lite_restore_wa = ~0;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000268
Chris Wilsonc0336662016-05-06 15:40:21 +0100269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000271 (engine->id == VCS || engine->id == VCS2);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000272
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000273 engine->ctx_desc_template = GEN8_CTX_VALID;
Chris Wilsonc0336662016-05-06 15:40:21 +0100274 if (IS_GEN8(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000277
278 /* TODO: WaDisableLiteRestore when we start using semaphore
279 * signalling between Command Streamers */
280 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
281
282 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
283 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000284 if (engine->disable_lite_restore_wa)
285 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000286}
287
288/**
289 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
290 * descriptor for a pinned context
291 *
292 * @ctx: Context to work on
Chris Wilson9021ad02016-05-24 14:53:37 +0100293 * @engine: Engine the descriptor will be used with
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000294 *
295 * The context descriptor encodes various attributes of a context,
296 * including its GTT address and some flags. Because it's fairly
297 * expensive to calculate, we'll just do it once and cache the result,
298 * which remains valid until the context is unpinned.
299 *
300 * This is what a descriptor looks like, from LSB to MSB:
Chris Wilsonef87bba2016-04-28 09:56:50 +0100301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
Chris Wilson7069b142016-04-28 09:56:52 +0100303 * bits 32-52: ctx ID, a globally unique tag
Chris Wilsonef87bba2016-04-28 09:56:50 +0100304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000306 */
307static void
Chris Wilsone2efd132016-05-24 14:53:34 +0100308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000309 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000310{
Chris Wilson9021ad02016-05-24 14:53:37 +0100311 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson7069b142016-04-28 09:56:52 +0100312 u64 desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000313
Chris Wilson7069b142016-04-28 09:56:52 +0100314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
315
Zhi Wangc01fc532016-06-16 08:07:02 -0400316 desc = ctx->desc_template; /* bits 3-4 */
317 desc |= engine->ctx_desc_template; /* bits 0-11 */
Chris Wilson9021ad02016-05-24 14:53:37 +0100318 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319 /* bits 12-31 */
Chris Wilson7069b142016-04-28 09:56:52 +0100320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000321
Chris Wilson9021ad02016-05-24 14:53:37 +0100322 ce->lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000323}
324
Chris Wilsone2efd132016-05-24 14:53:34 +0100325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000326 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000327{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000328 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000329}
330
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
332 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100333{
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300334
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000335 struct intel_engine_cs *engine = rq0->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +0100336 struct drm_i915_private *dev_priv = rq0->i915;
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300337 uint64_t desc[2];
Ben Widawsky84b790f2014-07-24 17:04:36 +0100338
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300339 if (rq1) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000340 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300341 rq1->elsp_submitted++;
342 } else {
343 desc[1] = 0;
344 }
Ben Widawsky84b790f2014-07-24 17:04:36 +0100345
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000346 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300347 rq0->elsp_submitted++;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100348
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300349 /* You must always write both descriptors in the order below. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000350 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
351 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
Chris Wilson6daccb02015-01-16 11:34:35 +0200352
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000353 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100354 /* The context is automatically loaded after the following */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000355 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100356
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300357 /* ELSP is a wo register, use another nearby reg for posting */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000358 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100359}
360
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000361static void
362execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
363{
364 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
365 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
366 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
367 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
368}
369
370static void execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100371{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000372 struct intel_engine_cs *engine = rq->engine;
Mika Kuoppala05d98242015-07-03 17:09:33 +0300373 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000374 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100375
Mika Kuoppala05d98242015-07-03 17:09:33 +0300376 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100377
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000378 /* True 32b PPGTT with dynamic page allocation: update PDP
379 * registers and point the unallocated PDPs to scratch page.
380 * PML4 is allocated during ppgtt init, so this is not needed
381 * in 48-bit mode.
382 */
383 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
384 execlists_update_context_pdps(ppgtt, reg_state);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100385}
386
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300387static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
388 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100389{
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000390 struct drm_i915_private *dev_priv = rq0->i915;
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100391 unsigned int fw_domains = rq0->engine->fw_domains;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000392
Mika Kuoppala05d98242015-07-03 17:09:33 +0300393 execlists_update_context(rq0);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100394
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300395 if (rq1)
Mika Kuoppala05d98242015-07-03 17:09:33 +0300396 execlists_update_context(rq1);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100397
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100398 spin_lock_irq(&dev_priv->uncore.lock);
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100399 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000400
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300401 execlists_elsp_write(rq0, rq1);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000402
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100403 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100404 spin_unlock_irq(&dev_priv->uncore.lock);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100405}
406
Zhi Wang3c7ba632016-06-16 08:07:03 -0400407static inline void execlists_context_status_change(
408 struct drm_i915_gem_request *rq,
409 unsigned long status)
410{
411 /*
412 * Only used when GVT-g is enabled now. When GVT-g is disabled,
413 * The compiler should eliminate this function as dead-code.
414 */
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return;
417
418 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419}
420
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000421static void execlists_context_unqueue(struct intel_engine_cs *engine)
Michel Thierryacdd8842014-07-24 17:04:38 +0100422{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000423 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000424 struct drm_i915_gem_request *cursor, *tmp;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100425
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000426 assert_spin_locked(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100427
Peter Antoine779949f2015-05-11 16:03:27 +0100428 /*
429 * If irqs are not active generate a warning as batches that finish
430 * without the irqs may get lost and a GPU Hang may occur.
431 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100432 WARN_ON(!intel_irqs_enabled(engine->i915));
Peter Antoine779949f2015-05-11 16:03:27 +0100433
Michel Thierryacdd8842014-07-24 17:04:38 +0100434 /* Try to read in pairs */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000435 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
Michel Thierryacdd8842014-07-24 17:04:38 +0100436 execlist_link) {
437 if (!req0) {
438 req0 = cursor;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000439 } else if (req0->ctx == cursor->ctx) {
Michel Thierryacdd8842014-07-24 17:04:38 +0100440 /* Same ctx: ignore first request, as second request
441 * will update tail past first request's workload */
Oscar Mateoe1fee722014-07-24 17:04:40 +0100442 cursor->elsp_submitted = req0->elsp_submitted;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100443 list_del(&req0->execlist_link);
444 i915_gem_request_unreference(req0);
Michel Thierryacdd8842014-07-24 17:04:38 +0100445 req0 = cursor;
446 } else {
Zhi Wang80a9a8d2016-06-16 08:07:04 -0400447 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448 /*
449 * req0 (after merged) ctx requires single
450 * submission, stop picking
451 */
452 if (req0->ctx->execlists_force_single_submission)
453 break;
454 /*
455 * req0 ctx doesn't require single submission,
456 * but next req ctx requires, stop picking
457 */
458 if (cursor->ctx->execlists_force_single_submission)
459 break;
460 }
Michel Thierryacdd8842014-07-24 17:04:38 +0100461 req1 = cursor;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000462 WARN_ON(req1->elsp_submitted);
Michel Thierryacdd8842014-07-24 17:04:38 +0100463 break;
464 }
465 }
466
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000467 if (unlikely(!req0))
468 return;
469
Zhi Wang3c7ba632016-06-16 08:07:03 -0400470 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472 if (req1)
473 execlists_context_status_change(req1,
474 INTEL_CONTEXT_SCHEDULE_IN);
475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000476 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
Michel Thierry53292cd2015-04-15 18:11:33 +0100477 /*
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000478 * WaIdleLiteRestore: make sure we never cause a lite restore
479 * with HEAD==TAIL.
480 *
481 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
482 * resubmit the request. See gen8_emit_request() for where we
483 * prepare the padding after the end of the request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100484 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000485 struct intel_ringbuffer *ringbuf;
Michel Thierry53292cd2015-04-15 18:11:33 +0100486
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000487 ringbuf = req0->ctx->engine[engine->id].ringbuf;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000488 req0->tail += 8;
489 req0->tail &= ringbuf->size - 1;
Michel Thierry53292cd2015-04-15 18:11:33 +0100490 }
491
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300492 execlists_submit_requests(req0, req1);
Michel Thierryacdd8842014-07-24 17:04:38 +0100493}
494
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000495static unsigned int
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100496execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100497{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000498 struct drm_i915_gem_request *head_req;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100499
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000500 assert_spin_locked(&engine->execlist_lock);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100501
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000502 head_req = list_first_entry_or_null(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000503 struct drm_i915_gem_request,
Thomas Daniele981e7b2014-07-24 17:04:39 +0100504 execlist_link);
505
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100506 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
507 return 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100508
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000509 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
510
511 if (--head_req->elsp_submitted > 0)
512 return 0;
513
Zhi Wang3c7ba632016-06-16 08:07:03 -0400514 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
515
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100516 list_del(&head_req->execlist_link);
517 i915_gem_request_unreference(head_req);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000518
519 return 1;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100520}
521
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000522static u32
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000524 u32 *context_id)
Ben Widawsky91a41032016-01-05 10:30:07 -0800525{
Chris Wilsonc0336662016-05-06 15:40:21 +0100526 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000527 u32 status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800528
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000529 read_pointer %= GEN8_CSB_ENTRIES;
Ben Widawsky91a41032016-01-05 10:30:07 -0800530
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000532
533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
534 return 0;
535
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000537 read_pointer));
538
539 return status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800540}
541
Oscar Mateo73e4d072014-07-24 17:04:48 +0100542/**
Daniel Vetter3f7531c2014-12-10 17:41:43 +0100543 * intel_lrc_irq_handler() - handle Context Switch interrupts
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100544 * @data: tasklet handler passed in unsigned long
Oscar Mateo73e4d072014-07-24 17:04:48 +0100545 *
546 * Check the unread Context Status Buffers and manage the submission of new
547 * contexts to the ELSP accordingly.
548 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100549static void intel_lrc_irq_handler(unsigned long data)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100550{
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100551 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
Chris Wilsonc0336662016-05-06 15:40:21 +0100552 struct drm_i915_private *dev_priv = engine->i915;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100553 u32 status_pointer;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000554 unsigned int read_pointer, write_pointer;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000555 u32 csb[GEN8_CSB_ENTRIES][2];
556 unsigned int csb_read = 0, i;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000557 unsigned int submit_contexts = 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100558
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100559 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000560
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000561 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
Thomas Daniele981e7b2014-07-24 17:04:39 +0100562
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000563 read_pointer = engine->next_context_status_buffer;
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800564 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100565 if (read_pointer > write_pointer)
Michel Thierrydfc53c52015-09-28 13:25:12 +0100566 write_pointer += GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100567
Thomas Daniele981e7b2014-07-24 17:04:39 +0100568 while (read_pointer < write_pointer) {
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000569 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
570 break;
571 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
572 &csb[csb_read][1]);
573 csb_read++;
Michel Thierry5af05fe2015-09-04 12:59:15 +0100574 }
Thomas Daniele981e7b2014-07-24 17:04:39 +0100575
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000576 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100577
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800578 /* Update the read pointer to the old write pointer. Manual ringbuffer
579 * management ftw </sarcasm> */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000580 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000581 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000582 engine->next_context_status_buffer << 8));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000583
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100584 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000585
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000586 spin_lock(&engine->execlist_lock);
587
588 for (i = 0; i < csb_read; i++) {
589 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
590 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
591 if (execlists_check_remove_request(engine, csb[i][1]))
592 WARN(1, "Lite Restored request removed from queue\n");
593 } else
594 WARN(1, "Preemption without Lite Restore\n");
595 }
596
597 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
598 GEN8_CTX_STATUS_ELEMENT_SWITCH))
599 submit_contexts +=
600 execlists_check_remove_request(engine, csb[i][1]);
601 }
602
603 if (submit_contexts) {
604 if (!engine->disable_lite_restore_wa ||
605 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
606 execlists_context_unqueue(engine);
607 }
608
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000609 spin_unlock(&engine->execlist_lock);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000610
611 if (unlikely(submit_contexts > 2))
612 DRM_ERROR("More than two context complete events?\n");
Thomas Daniele981e7b2014-07-24 17:04:39 +0100613}
614
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000615static void execlists_context_queue(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100616{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000617 struct intel_engine_cs *engine = request->engine;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000618 struct drm_i915_gem_request *cursor;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100619 int num_elements = 0;
Michel Thierryacdd8842014-07-24 17:04:38 +0100620
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100621 spin_lock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100622
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000623 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100624 if (++num_elements > 2)
625 break;
626
627 if (num_elements > 2) {
Nick Hoath6d3d8272015-01-15 13:10:39 +0000628 struct drm_i915_gem_request *tail_req;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100629
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000630 tail_req = list_last_entry(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000631 struct drm_i915_gem_request,
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100632 execlist_link);
633
John Harrisonae707972015-05-29 17:44:14 +0100634 if (request->ctx == tail_req->ctx) {
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100635 WARN(tail_req->elsp_submitted != 0,
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000636 "More than 2 already-submitted reqs queued\n");
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100637 list_del(&tail_req->execlist_link);
638 i915_gem_request_unreference(tail_req);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100639 }
640 }
641
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100642 i915_gem_request_reference(request);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000643 list_add_tail(&request->execlist_link, &engine->execlist_queue);
Tvrtko Ursulina3d12762016-04-28 09:56:57 +0100644 request->ctx_hw_id = request->ctx->hw_id;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100645 if (num_elements == 0)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000646 execlists_context_unqueue(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100647
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100648 spin_unlock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100649}
650
John Harrison2f200552015-05-29 17:43:53 +0100651static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100652{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000653 struct intel_engine_cs *engine = req->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100654 uint32_t flush_domains;
655 int ret;
656
657 flush_domains = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000658 if (engine->gpu_caches_dirty)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100659 flush_domains = I915_GEM_GPU_DOMAINS;
660
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000661 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100662 if (ret)
663 return ret;
664
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000665 engine->gpu_caches_dirty = false;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100666 return 0;
667}
668
John Harrison535fbe82015-05-29 17:43:32 +0100669static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100670 struct list_head *vmas)
671{
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000672 const unsigned other_rings = ~intel_engine_flag(req->engine);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100673 struct i915_vma *vma;
674 uint32_t flush_domains = 0;
675 bool flush_chipset = false;
676 int ret;
677
678 list_for_each_entry(vma, vmas, exec_list) {
679 struct drm_i915_gem_object *obj = vma->obj;
680
Chris Wilson03ade512015-04-27 13:41:18 +0100681 if (obj->active & other_rings) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000682 ret = i915_gem_object_sync(obj, req->engine, &req);
Chris Wilson03ade512015-04-27 13:41:18 +0100683 if (ret)
684 return ret;
685 }
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100686
687 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
688 flush_chipset |= i915_gem_clflush_object(obj, false);
689
690 flush_domains |= obj->base.write_domain;
691 }
692
693 if (flush_domains & I915_GEM_DOMAIN_GTT)
694 wmb();
695
696 /* Unconditionally invalidate gpu caches and ensure that we do flush
697 * any residual writes from the previous batch.
698 */
John Harrison2f200552015-05-29 17:43:53 +0100699 return logical_ring_invalidate_all_caches(req);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100700}
701
John Harrison40e895c2015-05-29 17:43:26 +0100702int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000703{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100704 struct intel_engine_cs *engine = request->engine;
Chris Wilson9021ad02016-05-24 14:53:37 +0100705 struct intel_context *ce = &request->ctx->engine[engine->id];
Chris Wilsonbfa01202016-04-28 09:56:48 +0100706 int ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000707
Chris Wilson63103462016-04-28 09:56:49 +0100708 /* Flush enough space to reduce the likelihood of waiting after
709 * we start building the request - in which case we will just
710 * have to repeat work.
711 */
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100712 request->reserved_space += EXECLISTS_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +0100713
Chris Wilson9021ad02016-05-24 14:53:37 +0100714 if (!ce->state) {
Chris Wilson978f1e02016-04-28 09:56:54 +0100715 ret = execlists_context_deferred_alloc(request->ctx, engine);
716 if (ret)
717 return ret;
718 }
719
Chris Wilson9021ad02016-05-24 14:53:37 +0100720 request->ringbuf = ce->ringbuf;
Mika Kuoppalaf3cc01f2015-07-06 11:08:30 +0300721
Alex Daia7e02192015-12-16 11:45:55 -0800722 if (i915.enable_guc_submission) {
723 /*
724 * Check that the GuC has space for the request before
725 * going any further, as the i915_add_request() call
726 * later on mustn't fail ...
727 */
Dave Gordon7c2c2702016-05-13 15:36:32 +0100728 ret = i915_guc_wq_check_space(request);
Alex Daia7e02192015-12-16 11:45:55 -0800729 if (ret)
730 return ret;
731 }
732
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100733 ret = intel_lr_context_pin(request->ctx, engine);
734 if (ret)
735 return ret;
Dave Gordone28e4042016-01-19 19:02:55 +0000736
Chris Wilsonbfa01202016-04-28 09:56:48 +0100737 ret = intel_ring_begin(request, 0);
738 if (ret)
739 goto err_unpin;
740
Chris Wilson9021ad02016-05-24 14:53:37 +0100741 if (!ce->initialised) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100742 ret = engine->init_context(request);
743 if (ret)
744 goto err_unpin;
745
Chris Wilson9021ad02016-05-24 14:53:37 +0100746 ce->initialised = true;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100747 }
748
749 /* Note that after this point, we have committed to using
750 * this request as it is being used to both track the
751 * state of engine initialisation and liveness of the
752 * golden renderstate above. Think twice before you try
753 * to cancel/unwind this request now.
754 */
755
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100756 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
Chris Wilsonbfa01202016-04-28 09:56:48 +0100757 return 0;
758
759err_unpin:
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100760 intel_lr_context_unpin(request->ctx, engine);
Dave Gordone28e4042016-01-19 19:02:55 +0000761 return ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000762}
763
John Harrisonbc0dce32015-03-19 12:30:07 +0000764/*
765 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
John Harrisonae707972015-05-29 17:44:14 +0100766 * @request: Request to advance the logical ringbuffer of.
John Harrisonbc0dce32015-03-19 12:30:07 +0000767 *
768 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
769 * really happens during submission is that the context and current tail will be placed
770 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
771 * point, the tail *inside* the context is updated and the ELSP written to.
772 */
Chris Wilson7c17d372016-01-20 15:43:35 +0200773static int
John Harrisonae707972015-05-29 17:44:14 +0100774intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000775{
Chris Wilson7c17d372016-01-20 15:43:35 +0200776 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000777 struct intel_engine_cs *engine = request->engine;
John Harrisonbc0dce32015-03-19 12:30:07 +0000778
Chris Wilson7c17d372016-01-20 15:43:35 +0200779 intel_logical_ring_advance(ringbuf);
780 request->tail = ringbuf->tail;
John Harrisonbc0dce32015-03-19 12:30:07 +0000781
Chris Wilson7c17d372016-01-20 15:43:35 +0200782 /*
783 * Here we add two extra NOOPs as padding to avoid
784 * lite restore of a context with HEAD==TAIL.
785 *
786 * Caller must reserve WA_TAIL_DWORDS for us!
787 */
788 intel_logical_ring_emit(ringbuf, MI_NOOP);
789 intel_logical_ring_emit(ringbuf, MI_NOOP);
790 intel_logical_ring_advance(ringbuf);
Alex Daid1675192015-08-12 15:43:43 +0100791
Chris Wilsona16a4052016-04-28 09:56:56 +0100792 /* We keep the previous context alive until we retire the following
793 * request. This ensures that any the context object is still pinned
794 * for any residual writes the HW makes into it on the context switch
795 * into the next object following the breadcrumb. Otherwise, we may
796 * retire the context too early.
797 */
798 request->previous_context = engine->last_context;
799 engine->last_context = request->ctx;
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +0000800
Dave Gordon7c2c2702016-05-13 15:36:32 +0100801 if (i915.enable_guc_submission)
802 i915_guc_submit(request);
Alex Daid1675192015-08-12 15:43:43 +0100803 else
804 execlists_context_queue(request);
Chris Wilson7c17d372016-01-20 15:43:35 +0200805
806 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000807}
808
Oscar Mateo73e4d072014-07-24 17:04:48 +0100809/**
810 * execlists_submission() - submit a batchbuffer for execution, Execlists style
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100811 * @params: execbuffer call parameters.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100812 * @args: execbuffer call arguments.
813 * @vmas: list of vmas.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100814 *
815 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
816 * away the submission details of the execbuffer ioctl call.
817 *
818 * Return: non-zero if the submission fails.
819 */
John Harrison5f19e2b2015-05-29 17:43:27 +0100820int intel_execlists_submission(struct i915_execbuffer_params *params,
Oscar Mateo454afeb2014-07-24 17:04:22 +0100821 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +0100822 struct list_head *vmas)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100823{
John Harrison5f19e2b2015-05-29 17:43:27 +0100824 struct drm_device *dev = params->dev;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000825 struct intel_engine_cs *engine = params->engine;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100826 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000827 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
John Harrison5f19e2b2015-05-29 17:43:27 +0100828 u64 exec_start;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100829 int instp_mode;
830 u32 instp_mask;
831 int ret;
832
833 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
834 instp_mask = I915_EXEC_CONSTANTS_MASK;
835 switch (instp_mode) {
836 case I915_EXEC_CONSTANTS_REL_GENERAL:
837 case I915_EXEC_CONSTANTS_ABSOLUTE:
838 case I915_EXEC_CONSTANTS_REL_SURFACE:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000839 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100840 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
841 return -EINVAL;
842 }
843
844 if (instp_mode != dev_priv->relative_constants_mode) {
845 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
846 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
847 return -EINVAL;
848 }
849
850 /* The HW changed the meaning on this bit on gen6 */
851 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
852 }
853 break;
854 default:
855 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
856 return -EINVAL;
857 }
858
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100859 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
860 DRM_DEBUG("sol reset is gen7 only\n");
861 return -EINVAL;
862 }
863
John Harrison535fbe82015-05-29 17:43:32 +0100864 ret = execlists_move_to_gpu(params->request, vmas);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100865 if (ret)
866 return ret;
867
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000868 if (engine == &dev_priv->engine[RCS] &&
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100869 instp_mode != dev_priv->relative_constants_mode) {
Chris Wilson987046a2016-04-28 09:56:46 +0100870 ret = intel_ring_begin(params->request, 4);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100871 if (ret)
872 return ret;
873
874 intel_logical_ring_emit(ringbuf, MI_NOOP);
875 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +0200876 intel_logical_ring_emit_reg(ringbuf, INSTPM);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100877 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
878 intel_logical_ring_advance(ringbuf);
879
880 dev_priv->relative_constants_mode = instp_mode;
881 }
882
John Harrison5f19e2b2015-05-29 17:43:27 +0100883 exec_start = params->batch_obj_vm_offset +
884 args->batch_start_offset;
885
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000886 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100887 if (ret)
888 return ret;
889
John Harrison95c24162015-05-29 17:43:31 +0100890 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
John Harrison5e4be7b2015-02-13 11:48:11 +0000891
John Harrison8a8edb52015-05-29 17:43:33 +0100892 i915_gem_execbuffer_move_to_active(vmas, params->request);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100893
Oscar Mateo454afeb2014-07-24 17:04:22 +0100894 return 0;
895}
896
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100897void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000898{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000899 struct drm_i915_gem_request *req, *tmp;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100900 LIST_HEAD(cancel_list);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000901
Chris Wilsonc0336662016-05-06 15:40:21 +0100902 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000903
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100904 spin_lock_bh(&engine->execlist_lock);
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100905 list_replace_init(&engine->execlist_queue, &cancel_list);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100906 spin_unlock_bh(&engine->execlist_lock);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000907
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100908 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000909 list_del(&req->execlist_link);
Nick Hoathf8210792015-01-29 16:55:07 +0000910 i915_gem_request_unreference(req);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000911 }
912}
913
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000914void intel_logical_ring_stop(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100915{
Chris Wilsonc0336662016-05-06 15:40:21 +0100916 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100917 int ret;
918
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000919 if (!intel_engine_initialized(engine))
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100920 return;
921
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000922 ret = intel_engine_idle(engine);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100923 if (ret)
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100924 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000925 engine->name, ret);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100926
927 /* TODO: Is this correct with Execlists enabled? */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000928 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
Chris Wilson3e7941a2016-06-30 15:33:23 +0100929 if (intel_wait_for_register(dev_priv,
930 RING_MI_MODE(engine->mmio_base),
931 MODE_IDLE, MODE_IDLE,
932 1000)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000933 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100934 return;
935 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000936 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100937}
938
John Harrison4866d722015-05-29 17:43:55 +0100939int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100940{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000941 struct intel_engine_cs *engine = req->engine;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100942 int ret;
943
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000944 if (!engine->gpu_caches_dirty)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100945 return 0;
946
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000947 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
Oscar Mateo48e29f52014-07-24 17:04:29 +0100948 if (ret)
949 return ret;
950
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000951 engine->gpu_caches_dirty = false;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100952 return 0;
953}
954
Chris Wilsone2efd132016-05-24 14:53:34 +0100955static int intel_lr_context_pin(struct i915_gem_context *ctx,
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100956 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000957{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100958 struct drm_i915_private *dev_priv = ctx->i915;
Chris Wilson9021ad02016-05-24 14:53:37 +0100959 struct intel_context *ce = &ctx->engine[engine->id];
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100960 void *vaddr;
961 u32 *lrc_reg_state;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000962 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000963
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100964 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000965
Chris Wilson9021ad02016-05-24 14:53:37 +0100966 if (ce->pin_count++)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100967 return 0;
968
Chris Wilson9021ad02016-05-24 14:53:37 +0100969 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
970 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
Nick Hoathe84fe802015-09-11 12:53:46 +0100971 if (ret)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100972 goto err;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000973
Chris Wilson9021ad02016-05-24 14:53:37 +0100974 vaddr = i915_gem_object_pin_map(ce->state);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100975 if (IS_ERR(vaddr)) {
976 ret = PTR_ERR(vaddr);
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000977 goto unpin_ctx_obj;
978 }
979
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100980 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
981
Chris Wilson9021ad02016-05-24 14:53:37 +0100982 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +0100983 if (ret)
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100984 goto unpin_map;
Alex Daid1675192015-08-12 15:43:43 +0100985
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100986 i915_gem_context_reference(ctx);
Chris Wilson9021ad02016-05-24 14:53:37 +0100987 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000988 intel_lr_context_descriptor_update(ctx, engine);
Chris Wilson9021ad02016-05-24 14:53:37 +0100989
990 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
991 ce->lrc_reg_state = lrc_reg_state;
992 ce->state->dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +0200993
Nick Hoathe84fe802015-09-11 12:53:46 +0100994 /* Invalidate GuC TLB. */
995 if (i915.enable_guc_submission)
996 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000997
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100998 return 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000999
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001000unpin_map:
Chris Wilson9021ad02016-05-24 14:53:37 +01001001 i915_gem_object_unpin_map(ce->state);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001002unpin_ctx_obj:
Chris Wilson9021ad02016-05-24 14:53:37 +01001003 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001004err:
Chris Wilson9021ad02016-05-24 14:53:37 +01001005 ce->pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001006 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +00001007}
1008
Chris Wilsone2efd132016-05-24 14:53:34 +01001009void intel_lr_context_unpin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001010 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +00001011{
Chris Wilson9021ad02016-05-24 14:53:37 +01001012 struct intel_context *ce = &ctx->engine[engine->id];
Daniel Vetteraf3302b2015-12-04 17:27:15 +01001013
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001014 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Chris Wilson9021ad02016-05-24 14:53:37 +01001015 GEM_BUG_ON(ce->pin_count == 0);
Tvrtko Ursulin321fe302016-01-28 10:29:55 +00001016
Chris Wilson9021ad02016-05-24 14:53:37 +01001017 if (--ce->pin_count)
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001018 return;
1019
Chris Wilson9021ad02016-05-24 14:53:37 +01001020 intel_unpin_ringbuffer_obj(ce->ringbuf);
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001021
Chris Wilson9021ad02016-05-24 14:53:37 +01001022 i915_gem_object_unpin_map(ce->state);
1023 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001024
Chris Wilson9021ad02016-05-24 14:53:37 +01001025 ce->lrc_vma = NULL;
1026 ce->lrc_desc = 0;
1027 ce->lrc_reg_state = NULL;
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001028
1029 i915_gem_context_unreference(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +00001030}
1031
John Harrisone2be4fa2015-05-29 17:43:54 +01001032static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +00001033{
1034 int ret, i;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001035 struct intel_engine_cs *engine = req->engine;
John Harrisone2be4fa2015-05-29 17:43:54 +01001036 struct intel_ringbuffer *ringbuf = req->ringbuf;
Chris Wilsonc0336662016-05-06 15:40:21 +01001037 struct i915_workarounds *w = &req->i915->workarounds;
Michel Thierry771b9a52014-11-11 16:47:33 +00001038
Boyer, Waynecd7feaa2016-01-06 17:15:29 -08001039 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +00001040 return 0;
1041
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001042 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001043 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001044 if (ret)
1045 return ret;
1046
Chris Wilson987046a2016-04-28 09:56:46 +01001047 ret = intel_ring_begin(req, w->count * 2 + 2);
Michel Thierry771b9a52014-11-11 16:47:33 +00001048 if (ret)
1049 return ret;
1050
1051 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1052 for (i = 0; i < w->count; i++) {
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001053 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
Michel Thierry771b9a52014-11-11 16:47:33 +00001054 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1055 }
1056 intel_logical_ring_emit(ringbuf, MI_NOOP);
1057
1058 intel_logical_ring_advance(ringbuf);
1059
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001060 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001061 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001062 if (ret)
1063 return ret;
1064
1065 return 0;
1066}
1067
Arun Siluvery83b8a982015-07-08 10:27:05 +01001068#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001069 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001070 int __index = (index)++; \
1071 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001072 return -ENOSPC; \
1073 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001074 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001075 } while (0)
1076
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001077#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001078 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +01001079
1080/*
1081 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1082 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1083 * but there is a slight complication as this is applied in WA batch where the
1084 * values are only initialized once so we cannot take register value at the
1085 * beginning and reuse it further; hence we save its value to memory, upload a
1086 * constant value with bit21 set and then we restore it back with the saved value.
1087 * To simplify the WA, a constant value is formed by using the default value
1088 * of this register. This shouldn't be a problem because we are only modifying
1089 * it for a short period and this batch in non-premptible. We can ofcourse
1090 * use additional instructions that read the actual value of the register
1091 * at that time and set our bit of interest but it makes the WA complicated.
1092 *
1093 * This WA is also required for Gen9 so extracting as a function avoids
1094 * code duplication.
1095 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001096static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Arun Siluvery9e000842015-07-03 14:27:31 +01001097 uint32_t *const batch,
1098 uint32_t index)
1099{
1100 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1101
Arun Siluverya4106a72015-07-14 15:01:29 +01001102 /*
Mika Kuoppalafe905812016-06-07 17:19:03 +03001103 * WaDisableLSQCROPERFforOCL:skl,kbl
Arun Siluverya4106a72015-07-14 15:01:29 +01001104 * This WA is implemented in skl_init_clock_gating() but since
1105 * this batch updates GEN8_L3SQCREG4 with default value we need to
1106 * set this bit here to retain the WA during flush.
1107 */
Mika Kuoppalafe905812016-06-07 17:19:03 +03001108 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1109 IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
Arun Siluverya4106a72015-07-14 15:01:29 +01001110 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1111
Arun Siluveryf1afe242015-08-04 16:22:20 +01001112 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001113 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001114 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001115 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001116 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001117
Arun Siluvery83b8a982015-07-08 10:27:05 +01001118 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001119 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001120 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +01001121
Arun Siluvery83b8a982015-07-08 10:27:05 +01001122 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1123 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1124 PIPE_CONTROL_DC_FLUSH_ENABLE));
1125 wa_ctx_emit(batch, index, 0);
1126 wa_ctx_emit(batch, index, 0);
1127 wa_ctx_emit(batch, index, 0);
1128 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001129
Arun Siluveryf1afe242015-08-04 16:22:20 +01001130 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001131 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001132 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001133 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001134 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001135
1136 return index;
1137}
1138
Arun Siluvery17ee9502015-06-19 19:07:01 +01001139static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1140 uint32_t offset,
1141 uint32_t start_alignment)
1142{
1143 return wa_ctx->offset = ALIGN(offset, start_alignment);
1144}
1145
1146static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1147 uint32_t offset,
1148 uint32_t size_alignment)
1149{
1150 wa_ctx->size = offset - wa_ctx->offset;
1151
1152 WARN(wa_ctx->size % size_alignment,
1153 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1154 wa_ctx->size, size_alignment);
1155 return 0;
1156}
1157
1158/**
1159 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1160 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001161 * @engine: only applicable for RCS
Arun Siluvery17ee9502015-06-19 19:07:01 +01001162 * @wa_ctx: structure representing wa_ctx
1163 * offset: specifies start of the batch, should be cache-aligned. This is updated
1164 * with the offset value received as input.
1165 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1166 * @batch: page in which WA are loaded
1167 * @offset: This field specifies the start of the batch, it should be
1168 * cache-aligned otherwise it is adjusted accordingly.
1169 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1170 * initialized at the beginning and shared across all contexts but this field
1171 * helps us to have multiple batches at different offsets and select them based
1172 * on a criteria. At the moment this batch always start at the beginning of the page
1173 * and at this point we don't have multiple wa_ctx batch buffers.
1174 *
1175 * The number of WA applied are not known at the beginning; we use this field
1176 * to return the no of DWORDS written.
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001177 *
Arun Siluvery17ee9502015-06-19 19:07:01 +01001178 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1179 * so it adds NOOPs as padding to make it cacheline aligned.
1180 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1181 * makes a complete batch buffer.
1182 *
1183 * Return: non-zero if we exceed the PAGE_SIZE limit.
1184 */
1185
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001186static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001187 struct i915_wa_ctx_bb *wa_ctx,
1188 uint32_t *const batch,
1189 uint32_t *offset)
1190{
Arun Siluvery0160f052015-06-23 15:46:57 +01001191 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001192 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1193
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001194 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001195 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001196
Arun Siluveryc82435b2015-06-19 18:37:13 +01001197 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001198 if (IS_BROADWELL(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001199 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001200 if (rc < 0)
1201 return rc;
1202 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001203 }
1204
Arun Siluvery0160f052015-06-23 15:46:57 +01001205 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1206 /* Actual scratch location is at 128 bytes offset */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001207 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001208
Arun Siluvery83b8a982015-07-08 10:27:05 +01001209 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1210 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1211 PIPE_CONTROL_GLOBAL_GTT_IVB |
1212 PIPE_CONTROL_CS_STALL |
1213 PIPE_CONTROL_QW_WRITE));
1214 wa_ctx_emit(batch, index, scratch_addr);
1215 wa_ctx_emit(batch, index, 0);
1216 wa_ctx_emit(batch, index, 0);
1217 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001218
Arun Siluvery17ee9502015-06-19 19:07:01 +01001219 /* Pad to end of cacheline */
1220 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001221 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001222
1223 /*
1224 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1225 * execution depends on the length specified in terms of cache lines
1226 * in the register CTX_RCS_INDIRECT_CTX
1227 */
1228
1229 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1230}
1231
1232/**
1233 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1234 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001235 * @engine: only applicable for RCS
Arun Siluvery17ee9502015-06-19 19:07:01 +01001236 * @wa_ctx: structure representing wa_ctx
1237 * offset: specifies start of the batch, should be cache-aligned.
1238 * size: size of the batch in DWORDS but HW expects in terms of cachelines
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001239 * @batch: page in which WA are loaded
Arun Siluvery17ee9502015-06-19 19:07:01 +01001240 * @offset: This field specifies the start of this batch.
1241 * This batch is started immediately after indirect_ctx batch. Since we ensure
1242 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1243 *
1244 * The number of DWORDS written are returned using this field.
1245 *
1246 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1247 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1248 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001249static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001250 struct i915_wa_ctx_bb *wa_ctx,
1251 uint32_t *const batch,
1252 uint32_t *offset)
1253{
1254 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1255
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001256 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001257 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001258
Arun Siluvery83b8a982015-07-08 10:27:05 +01001259 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001260
1261 return wa_ctx_end(wa_ctx, *offset = index, 1);
1262}
1263
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001264static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001265 struct i915_wa_ctx_bb *wa_ctx,
1266 uint32_t *const batch,
1267 uint32_t *offset)
1268{
Arun Siluverya4106a72015-07-14 15:01:29 +01001269 int ret;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001270 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1271
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001272 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001273 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1274 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001275 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery0504cff2015-07-14 15:01:27 +01001276
Arun Siluverya4106a72015-07-14 15:01:29 +01001277 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001278 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001279 if (ret < 0)
1280 return ret;
1281 index = ret;
1282
Mika Kuoppala066d4622016-06-07 17:19:15 +03001283 /* WaClearSlmSpaceAtContextSwitch:kbl */
1284 /* Actual scratch location is at 128 bytes offset */
1285 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1286 uint32_t scratch_addr
1287 = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1288
1289 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1290 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1291 PIPE_CONTROL_GLOBAL_GTT_IVB |
1292 PIPE_CONTROL_CS_STALL |
1293 PIPE_CONTROL_QW_WRITE));
1294 wa_ctx_emit(batch, index, scratch_addr);
1295 wa_ctx_emit(batch, index, 0);
1296 wa_ctx_emit(batch, index, 0);
1297 wa_ctx_emit(batch, index, 0);
1298 }
Arun Siluvery0504cff2015-07-14 15:01:27 +01001299 /* Pad to end of cacheline */
1300 while (index % CACHELINE_DWORDS)
1301 wa_ctx_emit(batch, index, MI_NOOP);
1302
1303 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1304}
1305
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001306static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001307 struct i915_wa_ctx_bb *wa_ctx,
1308 uint32_t *const batch,
1309 uint32_t *offset)
1310{
1311 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1312
Arun Siluvery9b014352015-07-14 15:01:30 +01001313 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001314 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1315 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Arun Siluvery9b014352015-07-14 15:01:30 +01001316 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001317 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
Arun Siluvery9b014352015-07-14 15:01:30 +01001318 wa_ctx_emit(batch, index,
1319 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1320 wa_ctx_emit(batch, index, MI_NOOP);
1321 }
1322
Tim Goreb1e429f2016-03-21 14:37:29 +00001323 /* WaClearTdlStateAckDirtyBits:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001324 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
Tim Goreb1e429f2016-03-21 14:37:29 +00001325 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1326
1327 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1328 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1329
1330 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1331 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1332
1333 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1334 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1335
1336 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1337 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1338 wa_ctx_emit(batch, index, 0x0);
1339 wa_ctx_emit(batch, index, MI_NOOP);
1340 }
1341
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001342 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001343 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1344 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001345 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1346
Arun Siluvery0504cff2015-07-14 15:01:27 +01001347 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1348
1349 return wa_ctx_end(wa_ctx, *offset = index, 1);
1350}
1351
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001352static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001353{
1354 int ret;
1355
Chris Wilsonc0336662016-05-06 15:40:21 +01001356 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001357 PAGE_ALIGN(size));
Chris Wilsonfe3db792016-04-25 13:32:13 +01001358 if (IS_ERR(engine->wa_ctx.obj)) {
Arun Siluvery17ee9502015-06-19 19:07:01 +01001359 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01001360 ret = PTR_ERR(engine->wa_ctx.obj);
1361 engine->wa_ctx.obj = NULL;
1362 return ret;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001363 }
1364
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001365 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001366 if (ret) {
1367 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1368 ret);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001369 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001370 return ret;
1371 }
1372
1373 return 0;
1374}
1375
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001376static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001377{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001378 if (engine->wa_ctx.obj) {
1379 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1380 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1381 engine->wa_ctx.obj = NULL;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001382 }
1383}
1384
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001385static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001386{
1387 int ret;
1388 uint32_t *batch;
1389 uint32_t offset;
1390 struct page *page;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001391 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001392
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001393 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001394
Arun Siluvery5e60d792015-06-23 15:50:44 +01001395 /* update this when WA for higher Gen are added */
Chris Wilsonc0336662016-05-06 15:40:21 +01001396 if (INTEL_GEN(engine->i915) > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001397 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Chris Wilsonc0336662016-05-06 15:40:21 +01001398 INTEL_GEN(engine->i915));
Arun Siluvery5e60d792015-06-23 15:50:44 +01001399 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001400 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001401
Arun Siluveryc4db7592015-06-19 18:37:11 +01001402 /* some WA perform writes to scratch page, ensure it is valid */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001403 if (engine->scratch.obj == NULL) {
1404 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001405 return -EINVAL;
1406 }
1407
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001408 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001409 if (ret) {
1410 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1411 return ret;
1412 }
1413
Dave Gordon033908a2015-12-10 18:51:23 +00001414 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001415 batch = kmap_atomic(page);
1416 offset = 0;
1417
Chris Wilsonc0336662016-05-06 15:40:21 +01001418 if (IS_GEN8(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001419 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001420 &wa_ctx->indirect_ctx,
1421 batch,
1422 &offset);
1423 if (ret)
1424 goto out;
1425
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001426 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001427 &wa_ctx->per_ctx,
1428 batch,
1429 &offset);
1430 if (ret)
1431 goto out;
Chris Wilsonc0336662016-05-06 15:40:21 +01001432 } else if (IS_GEN9(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001433 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001434 &wa_ctx->indirect_ctx,
1435 batch,
1436 &offset);
1437 if (ret)
1438 goto out;
1439
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001440 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001441 &wa_ctx->per_ctx,
1442 batch,
1443 &offset);
1444 if (ret)
1445 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001446 }
1447
1448out:
1449 kunmap_atomic(batch);
1450 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001451 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001452
1453 return ret;
1454}
1455
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001456static void lrc_init_hws(struct intel_engine_cs *engine)
1457{
Chris Wilsonc0336662016-05-06 15:40:21 +01001458 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001459
1460 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1461 (u32)engine->status_page.gfx_addr);
1462 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1463}
1464
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001465static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001466{
Chris Wilsonc0336662016-05-06 15:40:21 +01001467 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00001468 unsigned int next_context_status_buffer_hw;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001469
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001470 lrc_init_hws(engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01001471
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001472 I915_WRITE_IMR(engine,
1473 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1474 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001476 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001477 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1478 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001479 POSTING_READ(RING_MODE_GEN7(engine));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001480
1481 /*
1482 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1483 * zero, we need to read the write pointer from hardware and use its
1484 * value because "this register is power context save restored".
1485 * Effectively, these states have been observed:
1486 *
1487 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1488 * BDW | CSB regs not reset | CSB regs reset |
1489 * CHT | CSB regs not reset | CSB regs not reset |
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001490 * SKL | ? | ? |
1491 * BXT | ? | ? |
Michel Thierrydfc53c52015-09-28 13:25:12 +01001492 */
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001493 next_context_status_buffer_hw =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001494 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001495
1496 /*
1497 * When the CSB registers are reset (also after power-up / gpu reset),
1498 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1499 * this special case, so the first element read is CSB[0].
1500 */
1501 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1502 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1503
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001504 engine->next_context_status_buffer = next_context_status_buffer_hw;
1505 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001506
Tomas Elffc0768c2016-03-21 16:26:59 +00001507 intel_engine_init_hangcheck(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001508
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001509 return intel_mocs_init_engine(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001510}
1511
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001512static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001513{
Chris Wilsonc0336662016-05-06 15:40:21 +01001514 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001515 int ret;
1516
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001517 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001518 if (ret)
1519 return ret;
1520
1521 /* We need to disable the AsyncFlip performance optimisations in order
1522 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1523 * programmed to '1' on all products.
1524 *
1525 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1526 */
1527 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1528
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001529 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1530
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001531 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001532}
1533
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001534static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001535{
1536 int ret;
1537
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001538 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001539 if (ret)
1540 return ret;
1541
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001542 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001543}
1544
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001545static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1546{
1547 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001548 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001549 struct intel_ringbuffer *ringbuf = req->ringbuf;
1550 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1551 int i, ret;
1552
Chris Wilson987046a2016-04-28 09:56:46 +01001553 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001554 if (ret)
1555 return ret;
1556
1557 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1558 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1559 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1560
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001561 intel_logical_ring_emit_reg(ringbuf,
1562 GEN8_RING_PDP_UDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001563 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001564 intel_logical_ring_emit_reg(ringbuf,
1565 GEN8_RING_PDP_LDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001566 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1567 }
1568
1569 intel_logical_ring_emit(ringbuf, MI_NOOP);
1570 intel_logical_ring_advance(ringbuf);
1571
1572 return 0;
1573}
1574
John Harrisonbe795fc2015-05-29 17:44:03 +01001575static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
John Harrison8e004ef2015-02-13 11:48:10 +00001576 u64 offset, unsigned dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001577{
John Harrisonbe795fc2015-05-29 17:44:03 +01001578 struct intel_ringbuffer *ringbuf = req->ringbuf;
John Harrison8e004ef2015-02-13 11:48:10 +00001579 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Oscar Mateo15648582014-07-24 17:04:32 +01001580 int ret;
1581
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001582 /* Don't rely in hw updating PDPs, specially in lite-restore.
1583 * Ideally, we should set Force PD Restore in ctx descriptor,
1584 * but we can't. Force Restore would be a second option, but
1585 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001586 * not idle). PML4 is allocated during ppgtt init so this is
1587 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001588 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001589 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001590 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
Chris Wilsonc0336662016-05-06 15:40:21 +01001591 !intel_vgpu_active(req->i915)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001592 ret = intel_logical_ring_emit_pdps(req);
1593 if (ret)
1594 return ret;
1595 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001596
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001597 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001598 }
1599
Chris Wilson987046a2016-04-28 09:56:46 +01001600 ret = intel_ring_begin(req, 4);
Oscar Mateo15648582014-07-24 17:04:32 +01001601 if (ret)
1602 return ret;
1603
1604 /* FIXME(BDW): Address space and security selectors. */
Abdiel Janulgue69225282015-06-16 13:39:42 +03001605 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1606 (ppgtt<<8) |
1607 (dispatch_flags & I915_DISPATCH_RS ?
1608 MI_BATCH_RESOURCE_STREAMER : 0));
Oscar Mateo15648582014-07-24 17:04:32 +01001609 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1610 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1611 intel_logical_ring_emit(ringbuf, MI_NOOP);
1612 intel_logical_ring_advance(ringbuf);
1613
1614 return 0;
1615}
1616
Chris Wilson31bb59c2016-07-01 17:23:27 +01001617static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001618{
Chris Wilsonc0336662016-05-06 15:40:21 +01001619 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001620 I915_WRITE_IMR(engine,
1621 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1622 POSTING_READ_FW(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001623}
1624
Chris Wilson31bb59c2016-07-01 17:23:27 +01001625static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001626{
Chris Wilsonc0336662016-05-06 15:40:21 +01001627 struct drm_i915_private *dev_priv = engine->i915;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001628 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001629}
1630
John Harrison7deb4d32015-05-29 17:43:59 +01001631static int gen8_emit_flush(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001632 u32 invalidate_domains,
1633 u32 unused)
1634{
John Harrison7deb4d32015-05-29 17:43:59 +01001635 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001636 struct intel_engine_cs *engine = ringbuf->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +01001637 struct drm_i915_private *dev_priv = request->i915;
Oscar Mateo47122742014-07-24 17:04:28 +01001638 uint32_t cmd;
1639 int ret;
1640
Chris Wilson987046a2016-04-28 09:56:46 +01001641 ret = intel_ring_begin(request, 4);
Oscar Mateo47122742014-07-24 17:04:28 +01001642 if (ret)
1643 return ret;
1644
1645 cmd = MI_FLUSH_DW + 1;
1646
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001647 /* We always require a command barrier so that subsequent
1648 * commands, such as breadcrumb interrupts, are strictly ordered
1649 * wrt the contents of the write cache being flushed to memory
1650 * (and thus being coherent from the CPU).
1651 */
1652 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1653
1654 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1655 cmd |= MI_INVALIDATE_TLB;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001656 if (engine == &dev_priv->engine[VCS])
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001657 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001658 }
1659
1660 intel_logical_ring_emit(ringbuf, cmd);
1661 intel_logical_ring_emit(ringbuf,
1662 I915_GEM_HWS_SCRATCH_ADDR |
1663 MI_FLUSH_DW_USE_GTT);
1664 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1665 intel_logical_ring_emit(ringbuf, 0); /* value */
1666 intel_logical_ring_advance(ringbuf);
1667
1668 return 0;
1669}
1670
John Harrison7deb4d32015-05-29 17:43:59 +01001671static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001672 u32 invalidate_domains,
1673 u32 flush_domains)
1674{
John Harrison7deb4d32015-05-29 17:43:59 +01001675 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001676 struct intel_engine_cs *engine = ringbuf->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001677 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001678 bool vf_flush_wa = false, dc_flush_wa = false;
Oscar Mateo47122742014-07-24 17:04:28 +01001679 u32 flags = 0;
1680 int ret;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001681 int len;
Oscar Mateo47122742014-07-24 17:04:28 +01001682
1683 flags |= PIPE_CONTROL_CS_STALL;
1684
1685 if (flush_domains) {
1686 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1687 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001688 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001689 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001690 }
1691
1692 if (invalidate_domains) {
1693 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1694 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1695 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1696 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1697 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1698 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1699 flags |= PIPE_CONTROL_QW_WRITE;
1700 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001701
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001702 /*
1703 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1704 * pipe control.
1705 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001706 if (IS_GEN9(request->i915))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001707 vf_flush_wa = true;
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001708
1709 /* WaForGAMHang:kbl */
1710 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1711 dc_flush_wa = true;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001712 }
Imre Deak9647ff32015-01-25 13:27:11 -08001713
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001714 len = 6;
1715
1716 if (vf_flush_wa)
1717 len += 6;
1718
1719 if (dc_flush_wa)
1720 len += 12;
1721
1722 ret = intel_ring_begin(request, len);
Oscar Mateo47122742014-07-24 17:04:28 +01001723 if (ret)
1724 return ret;
1725
Imre Deak9647ff32015-01-25 13:27:11 -08001726 if (vf_flush_wa) {
1727 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1728 intel_logical_ring_emit(ringbuf, 0);
1729 intel_logical_ring_emit(ringbuf, 0);
1730 intel_logical_ring_emit(ringbuf, 0);
1731 intel_logical_ring_emit(ringbuf, 0);
1732 intel_logical_ring_emit(ringbuf, 0);
1733 }
1734
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001735 if (dc_flush_wa) {
1736 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1737 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
1738 intel_logical_ring_emit(ringbuf, 0);
1739 intel_logical_ring_emit(ringbuf, 0);
1740 intel_logical_ring_emit(ringbuf, 0);
1741 intel_logical_ring_emit(ringbuf, 0);
1742 }
1743
Oscar Mateo47122742014-07-24 17:04:28 +01001744 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1745 intel_logical_ring_emit(ringbuf, flags);
1746 intel_logical_ring_emit(ringbuf, scratch_addr);
1747 intel_logical_ring_emit(ringbuf, 0);
1748 intel_logical_ring_emit(ringbuf, 0);
1749 intel_logical_ring_emit(ringbuf, 0);
Mika Kuoppala0b2d0932016-06-07 17:19:10 +03001750
1751 if (dc_flush_wa) {
1752 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1753 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
1754 intel_logical_ring_emit(ringbuf, 0);
1755 intel_logical_ring_emit(ringbuf, 0);
1756 intel_logical_ring_emit(ringbuf, 0);
1757 intel_logical_ring_emit(ringbuf, 0);
1758 }
1759
Oscar Mateo47122742014-07-24 17:04:28 +01001760 intel_logical_ring_advance(ringbuf);
1761
1762 return 0;
1763}
1764
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001765static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
Imre Deak319404d2015-08-14 18:35:27 +03001766{
Imre Deak319404d2015-08-14 18:35:27 +03001767 /*
1768 * On BXT A steppings there is a HW coherency issue whereby the
1769 * MI_STORE_DATA_IMM storing the completed request's seqno
1770 * occasionally doesn't invalidate the CPU cache. Work around this by
1771 * clflushing the corresponding cacheline whenever the caller wants
1772 * the coherency to be guaranteed. Note that this cacheline is known
1773 * to be clean at this point, since we only write it in
1774 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1775 * this clflush in practice becomes an invalidate operation.
1776 */
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001777 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001778}
1779
Chris Wilson7c17d372016-01-20 15:43:35 +02001780/*
1781 * Reserve space for 2 NOOPs at the end of each request to be
1782 * used as a workaround for not being allowed to do lite
1783 * restore with HEAD==TAIL (WaIdleLiteRestore).
1784 */
1785#define WA_TAIL_DWORDS 2
1786
John Harrisonc4e76632015-05-29 17:44:01 +01001787static int gen8_emit_request(struct drm_i915_gem_request *request)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001788{
John Harrisonc4e76632015-05-29 17:44:01 +01001789 struct intel_ringbuffer *ringbuf = request->ringbuf;
Oscar Mateo4da46e12014-07-24 17:04:27 +01001790 int ret;
1791
Chris Wilson987046a2016-04-28 09:56:46 +01001792 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001793 if (ret)
1794 return ret;
1795
Chris Wilson7c17d372016-01-20 15:43:35 +02001796 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1797 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001798
Oscar Mateo4da46e12014-07-24 17:04:27 +01001799 intel_logical_ring_emit(ringbuf,
Chris Wilson7c17d372016-01-20 15:43:35 +02001800 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1801 intel_logical_ring_emit(ringbuf,
Chris Wilsona58c01a2016-04-29 13:18:21 +01001802 intel_hws_seqno_address(request->engine) |
Chris Wilson7c17d372016-01-20 15:43:35 +02001803 MI_FLUSH_DW_USE_GTT);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001804 intel_logical_ring_emit(ringbuf, 0);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001805 intel_logical_ring_emit(ringbuf, request->seqno);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001806 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1807 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001808 return intel_logical_ring_advance_and_submit(request);
1809}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001810
Chris Wilson7c17d372016-01-20 15:43:35 +02001811static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1812{
1813 struct intel_ringbuffer *ringbuf = request->ringbuf;
1814 int ret;
1815
Chris Wilson987046a2016-04-28 09:56:46 +01001816 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
Chris Wilson7c17d372016-01-20 15:43:35 +02001817 if (ret)
1818 return ret;
1819
Michał Winiarskice81a652016-04-12 15:51:55 +02001820 /* We're using qword write, seqno should be aligned to 8 bytes. */
1821 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1822
Chris Wilson7c17d372016-01-20 15:43:35 +02001823 /* w/a for post sync ops following a GPGPU operation we
1824 * need a prior CS_STALL, which is emitted by the flush
1825 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001826 */
Michał Winiarskice81a652016-04-12 15:51:55 +02001827 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
Chris Wilson7c17d372016-01-20 15:43:35 +02001828 intel_logical_ring_emit(ringbuf,
1829 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1830 PIPE_CONTROL_CS_STALL |
1831 PIPE_CONTROL_QW_WRITE));
Chris Wilsona58c01a2016-04-29 13:18:21 +01001832 intel_logical_ring_emit(ringbuf,
1833 intel_hws_seqno_address(request->engine));
Chris Wilson7c17d372016-01-20 15:43:35 +02001834 intel_logical_ring_emit(ringbuf, 0);
1835 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Michał Winiarskice81a652016-04-12 15:51:55 +02001836 /* We're thrashing one dword of HWS. */
1837 intel_logical_ring_emit(ringbuf, 0);
Chris Wilson7c17d372016-01-20 15:43:35 +02001838 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
Michał Winiarskice81a652016-04-12 15:51:55 +02001839 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001840 return intel_logical_ring_advance_and_submit(request);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001841}
1842
John Harrisonbe013632015-05-29 17:43:45 +01001843static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
Damien Lespiaucef437a2015-02-10 19:32:19 +00001844{
Damien Lespiaucef437a2015-02-10 19:32:19 +00001845 struct render_state so;
Damien Lespiaucef437a2015-02-10 19:32:19 +00001846 int ret;
1847
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001848 ret = i915_gem_render_state_prepare(req->engine, &so);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001849 if (ret)
1850 return ret;
1851
1852 if (so.rodata == NULL)
1853 return 0;
1854
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001855 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
John Harrisonbe013632015-05-29 17:43:45 +01001856 I915_DISPATCH_SECURE);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001857 if (ret)
1858 goto out;
1859
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001860 ret = req->engine->emit_bb_start(req,
Arun Siluvery84e81022015-07-20 10:46:10 +01001861 (so.ggtt_offset + so.aux_batch_offset),
1862 I915_DISPATCH_SECURE);
1863 if (ret)
1864 goto out;
1865
John Harrisonb2af0372015-05-29 17:43:50 +01001866 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001867
Damien Lespiaucef437a2015-02-10 19:32:19 +00001868out:
1869 i915_gem_render_state_fini(&so);
1870 return ret;
1871}
1872
John Harrison87531812015-05-29 17:43:44 +01001873static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001874{
1875 int ret;
1876
John Harrisone2be4fa2015-05-29 17:43:54 +01001877 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001878 if (ret)
1879 return ret;
1880
Peter Antoine3bbaba02015-07-10 20:13:11 +03001881 ret = intel_rcs_context_init_mocs(req);
1882 /*
1883 * Failing to program the MOCS is non-fatal.The system will not
1884 * run at peak performance. So generate an error and carry on.
1885 */
1886 if (ret)
1887 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1888
John Harrisonbe013632015-05-29 17:43:45 +01001889 return intel_lr_context_render_state_init(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001890}
1891
Oscar Mateo73e4d072014-07-24 17:04:48 +01001892/**
1893 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1894 *
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01001895 * @engine: Engine Command Streamer.
Oscar Mateo73e4d072014-07-24 17:04:48 +01001896 *
1897 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001898void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001899{
John Harrison6402c332014-10-31 12:00:26 +00001900 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001901
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001902 if (!intel_engine_initialized(engine))
Oscar Mateo48d82382014-07-24 17:04:23 +01001903 return;
1904
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001905 /*
1906 * Tasklet cannot be active at this point due intel_mark_active/idle
1907 * so this is just for documentation.
1908 */
1909 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1910 tasklet_kill(&engine->irq_tasklet);
1911
Chris Wilsonc0336662016-05-06 15:40:21 +01001912 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00001913
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001914 if (engine->buffer) {
1915 intel_logical_ring_stop(engine);
1916 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00001917 }
Oscar Mateo48d82382014-07-24 17:04:23 +01001918
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001919 if (engine->cleanup)
1920 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01001921
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001922 i915_cmd_parser_fini_ring(engine);
1923 i915_gem_batch_pool_fini(&engine->batch_pool);
Oscar Mateo48d82382014-07-24 17:04:23 +01001924
Chris Wilson688e6c72016-07-01 17:23:15 +01001925 intel_engine_fini_breadcrumbs(engine);
1926
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001927 if (engine->status_page.obj) {
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001928 i915_gem_object_unpin_map(engine->status_page.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001929 engine->status_page.obj = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01001930 }
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001931 intel_lr_context_unpin(dev_priv->kernel_context, engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001932
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001933 engine->idle_lite_restore_wa = 0;
1934 engine->disable_lite_restore_wa = false;
1935 engine->ctx_desc_template = 0;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001936
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001937 lrc_destroy_wa_ctx_obj(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01001938 engine->i915 = NULL;
Oscar Mateo454afeb2014-07-24 17:04:22 +01001939}
1940
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001941static void
Chris Wilsone1382ef2016-05-06 15:40:20 +01001942logical_ring_default_vfuncs(struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001943{
1944 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001945 engine->init_hw = gen8_init_common_ring;
1946 engine->emit_request = gen8_emit_request;
1947 engine->emit_flush = gen8_emit_flush;
Chris Wilson31bb59c2016-07-01 17:23:27 +01001948 engine->irq_enable = gen8_logical_ring_enable_irq;
1949 engine->irq_disable = gen8_logical_ring_disable_irq;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001950 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilson1b7744e2016-07-01 17:23:17 +01001951 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001952 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001953}
1954
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001955static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001956logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001957{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001958 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1959 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001960}
1961
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001962static int
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001963lrc_setup_hws(struct intel_engine_cs *engine,
1964 struct drm_i915_gem_object *dctx_obj)
1965{
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001966 void *hws;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001967
1968 /* The HWSP is part of the default context object in LRC mode. */
1969 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1970 LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001971 hws = i915_gem_object_pin_map(dctx_obj);
1972 if (IS_ERR(hws))
1973 return PTR_ERR(hws);
1974 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001975 engine->status_page.obj = dctx_obj;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001976
1977 return 0;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001978}
1979
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001980static int
1981logical_ring_init(struct intel_engine_cs *engine)
1982{
1983 struct i915_gem_context *dctx = engine->i915->kernel_context;
1984 int ret;
1985
Chris Wilson688e6c72016-07-01 17:23:15 +01001986 ret = intel_engine_init_breadcrumbs(engine);
1987 if (ret)
1988 goto error;
1989
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001990 ret = i915_cmd_parser_init_ring(engine);
1991 if (ret)
1992 goto error;
1993
1994 ret = execlists_context_deferred_alloc(dctx, engine);
1995 if (ret)
1996 goto error;
1997
1998 /* As this is the default context, always pin it */
1999 ret = intel_lr_context_pin(dctx, engine);
2000 if (ret) {
2001 DRM_ERROR("Failed to pin context for %s: %d\n",
2002 engine->name, ret);
2003 goto error;
2004 }
2005
2006 /* And setup the hardware status page. */
2007 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2008 if (ret) {
2009 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2010 goto error;
2011 }
2012
2013 return 0;
2014
2015error:
2016 intel_logical_ring_cleanup(engine);
2017 return ret;
2018}
2019
2020static int logical_render_ring_init(struct intel_engine_cs *engine)
2021{
2022 struct drm_i915_private *dev_priv = engine->i915;
2023 int ret;
2024
2025 if (HAS_L3_DPF(dev_priv))
2026 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2027
2028 /* Override some for render ring. */
2029 if (INTEL_GEN(dev_priv) >= 9)
2030 engine->init_hw = gen9_init_render_ring;
2031 else
2032 engine->init_hw = gen8_init_render_ring;
2033 engine->init_context = gen8_init_rcs_context;
2034 engine->cleanup = intel_fini_pipe_control;
2035 engine->emit_flush = gen8_emit_flush_render;
2036 engine->emit_request = gen8_emit_request_render;
2037
Chris Wilson7d5ea802016-07-01 17:23:20 +01002038 ret = intel_init_pipe_control(engine, 4096);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002039 if (ret)
2040 return ret;
2041
2042 ret = intel_init_workaround_bb(engine);
2043 if (ret) {
2044 /*
2045 * We continue even if we fail to initialize WA batch
2046 * because we only expect rare glitches but nothing
2047 * critical to prevent us from using GPU
2048 */
2049 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2050 ret);
2051 }
2052
2053 ret = logical_ring_init(engine);
2054 if (ret) {
2055 lrc_destroy_wa_ctx_obj(engine);
2056 }
2057
2058 return ret;
2059}
2060
Chris Wilsone1382ef2016-05-06 15:40:20 +01002061static const struct logical_ring_info {
2062 const char *name;
2063 unsigned exec_id;
2064 unsigned guc_id;
2065 u32 mmio_base;
2066 unsigned irq_shift;
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002067 int (*init)(struct intel_engine_cs *engine);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002068} logical_rings[] = {
2069 [RCS] = {
2070 .name = "render ring",
2071 .exec_id = I915_EXEC_RENDER,
2072 .guc_id = GUC_RENDER_ENGINE,
2073 .mmio_base = RENDER_RING_BASE,
2074 .irq_shift = GEN8_RCS_IRQ_SHIFT,
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002075 .init = logical_render_ring_init,
Chris Wilsone1382ef2016-05-06 15:40:20 +01002076 },
2077 [BCS] = {
2078 .name = "blitter ring",
2079 .exec_id = I915_EXEC_BLT,
2080 .guc_id = GUC_BLITTER_ENGINE,
2081 .mmio_base = BLT_RING_BASE,
2082 .irq_shift = GEN8_BCS_IRQ_SHIFT,
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002083 .init = logical_ring_init,
Chris Wilsone1382ef2016-05-06 15:40:20 +01002084 },
2085 [VCS] = {
2086 .name = "bsd ring",
2087 .exec_id = I915_EXEC_BSD,
2088 .guc_id = GUC_VIDEO_ENGINE,
2089 .mmio_base = GEN6_BSD_RING_BASE,
2090 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002091 .init = logical_ring_init,
Chris Wilsone1382ef2016-05-06 15:40:20 +01002092 },
2093 [VCS2] = {
2094 .name = "bsd2 ring",
2095 .exec_id = I915_EXEC_BSD,
2096 .guc_id = GUC_VIDEO_ENGINE2,
2097 .mmio_base = GEN8_BSD2_RING_BASE,
2098 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002099 .init = logical_ring_init,
Chris Wilsone1382ef2016-05-06 15:40:20 +01002100 },
2101 [VECS] = {
2102 .name = "video enhancement ring",
2103 .exec_id = I915_EXEC_VEBOX,
2104 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2105 .mmio_base = VEBOX_RING_BASE,
2106 .irq_shift = GEN8_VECS_IRQ_SHIFT,
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002107 .init = logical_ring_init,
Chris Wilsone1382ef2016-05-06 15:40:20 +01002108 },
2109};
2110
2111static struct intel_engine_cs *
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002112logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
Oscar Mateo454afeb2014-07-24 17:04:22 +01002113{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002114 const struct logical_ring_info *info = &logical_rings[id];
Chris Wilsone1382ef2016-05-06 15:40:20 +01002115 struct intel_engine_cs *engine = &dev_priv->engine[id];
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002116 enum forcewake_domains fw_domains;
Chris Wilsone1382ef2016-05-06 15:40:20 +01002117
2118 engine->id = id;
2119 engine->name = info->name;
2120 engine->exec_id = info->exec_id;
2121 engine->guc_id = info->guc_id;
2122 engine->mmio_base = info->mmio_base;
2123
Chris Wilsonc0336662016-05-06 15:40:21 +01002124 engine->i915 = dev_priv;
Oscar Mateo48d82382014-07-24 17:04:23 +01002125
2126 /* Intentionally left blank. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002127 engine->buffer = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01002128
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002129 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2130 RING_ELSP(engine),
2131 FW_REG_WRITE);
2132
2133 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2134 RING_CONTEXT_STATUS_PTR(engine),
2135 FW_REG_READ | FW_REG_WRITE);
2136
2137 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2138 RING_CONTEXT_STATUS_BUF_BASE(engine),
2139 FW_REG_READ);
2140
2141 engine->fw_domains = fw_domains;
2142
Chris Wilsone1382ef2016-05-06 15:40:20 +01002143 INIT_LIST_HEAD(&engine->active_list);
2144 INIT_LIST_HEAD(&engine->request_list);
2145 INIT_LIST_HEAD(&engine->buffers);
2146 INIT_LIST_HEAD(&engine->execlist_queue);
2147 spin_lock_init(&engine->execlist_lock);
2148
2149 tasklet_init(&engine->irq_tasklet,
2150 intel_lrc_irq_handler, (unsigned long)engine);
2151
2152 logical_ring_init_platform_invariants(engine);
2153 logical_ring_default_vfuncs(engine);
2154 logical_ring_default_irqs(engine, info->irq_shift);
2155
2156 intel_engine_init_hangcheck(engine);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002157 i915_gem_batch_pool_init(dev_priv->dev, &engine->batch_pool);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002158
2159 return engine;
2160}
2161
Oscar Mateo73e4d072014-07-24 17:04:48 +01002162/**
2163 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2164 * @dev: DRM device.
2165 *
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002166 * This function inits the engines for an Execlists submission style (the
2167 * equivalent in the legacy ringbuffer submission world would be
2168 * i915_gem_init_engines). It does it only for those engines that are present in
2169 * the hardware.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002170 *
2171 * Return: non-zero if the initialization failed.
2172 */
Oscar Mateo454afeb2014-07-24 17:04:22 +01002173int intel_logical_rings_init(struct drm_device *dev)
2174{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002175 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002176 unsigned int mask = 0;
2177 unsigned int i;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002178 int ret;
2179
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002180 WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
2181 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
Oscar Mateo454afeb2014-07-24 17:04:22 +01002182
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002183 for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
2184 if (!HAS_ENGINE(dev_priv, i))
2185 continue;
2186
2187 if (!logical_rings[i].init)
2188 continue;
2189
2190 ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
Oscar Mateo454afeb2014-07-24 17:04:22 +01002191 if (ret)
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002192 goto cleanup;
2193
2194 mask |= ENGINE_MASK(i);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002195 }
2196
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002197 /*
2198 * Catch failures to update logical_rings table when the new engines
2199 * are added to the driver by a warning and disabling the forgotten
2200 * engines.
2201 */
2202 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
2203 struct intel_device_info *info =
2204 (struct intel_device_info *)&dev_priv->info;
2205 info->ring_mask = mask;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002206 }
2207
Oscar Mateo454afeb2014-07-24 17:04:22 +01002208 return 0;
2209
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01002210cleanup:
2211 for (i = 0; i < I915_NUM_ENGINES; i++)
2212 intel_logical_ring_cleanup(&dev_priv->engine[i]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002213
2214 return ret;
2215}
2216
Jeff McGee0cea6502015-02-13 10:27:56 -06002217static u32
Chris Wilsonc0336662016-05-06 15:40:21 +01002218make_rpcs(struct drm_i915_private *dev_priv)
Jeff McGee0cea6502015-02-13 10:27:56 -06002219{
2220 u32 rpcs = 0;
2221
2222 /*
2223 * No explicit RPCS request is needed to ensure full
2224 * slice/subslice/EU enablement prior to Gen9.
2225 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002226 if (INTEL_GEN(dev_priv) < 9)
Jeff McGee0cea6502015-02-13 10:27:56 -06002227 return 0;
2228
2229 /*
2230 * Starting in Gen9, render power gating can leave
2231 * slice/subslice/EU in a partially enabled state. We
2232 * must make an explicit request through RPCS for full
2233 * enablement.
2234 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002235 if (INTEL_INFO(dev_priv)->has_slice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002236 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002237 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002238 GEN8_RPCS_S_CNT_SHIFT;
2239 rpcs |= GEN8_RPCS_ENABLE;
2240 }
2241
Chris Wilsonc0336662016-05-06 15:40:21 +01002242 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002243 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002244 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002245 GEN8_RPCS_SS_CNT_SHIFT;
2246 rpcs |= GEN8_RPCS_ENABLE;
2247 }
2248
Chris Wilsonc0336662016-05-06 15:40:21 +01002249 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2250 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002251 GEN8_RPCS_EU_MIN_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +01002252 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002253 GEN8_RPCS_EU_MAX_SHIFT;
2254 rpcs |= GEN8_RPCS_ENABLE;
2255 }
2256
2257 return rpcs;
2258}
2259
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002260static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00002261{
2262 u32 indirect_ctx_offset;
2263
Chris Wilsonc0336662016-05-06 15:40:21 +01002264 switch (INTEL_GEN(engine->i915)) {
Michel Thierry71562912016-02-23 10:31:49 +00002265 default:
Chris Wilsonc0336662016-05-06 15:40:21 +01002266 MISSING_CASE(INTEL_GEN(engine->i915));
Michel Thierry71562912016-02-23 10:31:49 +00002267 /* fall through */
2268 case 9:
2269 indirect_ctx_offset =
2270 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2271 break;
2272 case 8:
2273 indirect_ctx_offset =
2274 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2275 break;
2276 }
2277
2278 return indirect_ctx_offset;
2279}
2280
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002281static int
Chris Wilsone2efd132016-05-24 14:53:34 +01002282populate_lr_context(struct i915_gem_context *ctx,
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002283 struct drm_i915_gem_object *ctx_obj,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002284 struct intel_engine_cs *engine,
2285 struct intel_ringbuffer *ringbuf)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002286{
Chris Wilsonc0336662016-05-06 15:40:21 +01002287 struct drm_i915_private *dev_priv = ctx->i915;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002288 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002289 void *vaddr;
2290 u32 *reg_state;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002291 int ret;
2292
Thomas Daniel2d965532014-08-19 10:13:36 +01002293 if (!ppgtt)
2294 ppgtt = dev_priv->mm.aliasing_ppgtt;
2295
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002296 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2297 if (ret) {
2298 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2299 return ret;
2300 }
2301
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002302 vaddr = i915_gem_object_pin_map(ctx_obj);
2303 if (IS_ERR(vaddr)) {
2304 ret = PTR_ERR(vaddr);
2305 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002306 return ret;
2307 }
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002308 ctx_obj->dirty = true;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002309
2310 /* The second page of the context object contains some fields which must
2311 * be set up prior to the first execution. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002312 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002313
2314 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2315 * commands followed by (reg, value) pairs. The values we are setting here are
2316 * only for the first context restore: on a subsequent save, the GPU will
2317 * recreate this batchbuffer with new values (including all the missing
2318 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002319 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002320 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2321 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2322 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002323 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2324 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Chris Wilsonc0336662016-05-06 15:40:21 +01002325 (HAS_RESOURCE_STREAMER(dev_priv) ?
Michel Thierry99cf8ea2016-02-25 09:48:58 +00002326 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002327 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2328 0);
2329 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2330 0);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002331 /* Ring buffer start address is not known until the buffer is pinned.
2332 * It is written to the context image in execlists_update_context()
2333 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002334 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2335 RING_START(engine->mmio_base), 0);
2336 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2337 RING_CTL(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002338 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002339 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2340 RING_BBADDR_UDW(engine->mmio_base), 0);
2341 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2342 RING_BBADDR(engine->mmio_base), 0);
2343 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2344 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002345 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002346 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2347 RING_SBBADDR_UDW(engine->mmio_base), 0);
2348 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2349 RING_SBBADDR(engine->mmio_base), 0);
2350 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2351 RING_SBBSTATE(engine->mmio_base), 0);
2352 if (engine->id == RCS) {
2353 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2354 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2355 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2356 RING_INDIRECT_CTX(engine->mmio_base), 0);
2357 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2358 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2359 if (engine->wa_ctx.obj) {
2360 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002361 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2362
2363 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2364 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2365 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2366
2367 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002368 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002369
2370 reg_state[CTX_BB_PER_CTX_PTR+1] =
2371 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2372 0x01;
2373 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002374 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002375 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002376 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2377 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002378 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002379 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2380 0);
2381 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2382 0);
2383 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2384 0);
2385 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2386 0);
2387 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2388 0);
2389 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2390 0);
2391 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2392 0);
2393 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2394 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002395
Michel Thierry2dba3232015-07-30 11:06:23 +01002396 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2397 /* 64b PPGTT (48bit canonical)
2398 * PDP0_DESCRIPTOR contains the base address to PML4 and
2399 * other PDP Descriptors are ignored.
2400 */
2401 ASSIGN_CTX_PML4(ppgtt, reg_state);
2402 } else {
2403 /* 32b PPGTT
2404 * PDP*_DESCRIPTOR contains the base address of space supported.
2405 * With dynamic page allocation, PDPs may not be allocated at
2406 * this point. Point the unallocated PDPs to the scratch page
2407 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00002408 execlists_update_context_pdps(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002409 }
2410
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002411 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002412 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002413 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilsonc0336662016-05-06 15:40:21 +01002414 make_rpcs(dev_priv));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002415 }
2416
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002417 i915_gem_object_unpin_map(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002418
2419 return 0;
2420}
2421
Oscar Mateo73e4d072014-07-24 17:04:48 +01002422/**
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002423 * intel_lr_context_size() - return the size of the context for an engine
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002424 * @engine: which engine to find the context size for
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002425 *
2426 * Each engine may require a different amount of space for a context image,
2427 * so when allocating (or copying) an image, this function can be used to
2428 * find the right size for the specific engine.
2429 *
2430 * Return: size (in bytes) of an engine-specific context image
2431 *
2432 * Note: this size includes the HWSP, which is part of the context image
2433 * in LRC mode, but does not include the "shared data page" used with
2434 * GuC submission. The caller should account for this if using the GuC.
2435 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002436uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002437{
2438 int ret = 0;
2439
Chris Wilsonc0336662016-05-06 15:40:21 +01002440 WARN_ON(INTEL_GEN(engine->i915) < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002441
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002442 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002443 case RCS:
Chris Wilsonc0336662016-05-06 15:40:21 +01002444 if (INTEL_GEN(engine->i915) >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002445 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2446 else
2447 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002448 break;
2449 case VCS:
2450 case BCS:
2451 case VECS:
2452 case VCS2:
2453 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2454 break;
2455 }
2456
2457 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002458}
2459
Oscar Mateo73e4d072014-07-24 17:04:48 +01002460/**
Chris Wilson978f1e02016-04-28 09:56:54 +01002461 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
Oscar Mateo73e4d072014-07-24 17:04:48 +01002462 * @ctx: LR context to create.
Chris Wilson978f1e02016-04-28 09:56:54 +01002463 * @engine: engine to be used with the context.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002464 *
2465 * This function can be called more than once, with different engines, if we plan
2466 * to use the context with them. The context backing objects and the ringbuffers
2467 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2468 * the creation is a deferred call: it's better to make sure first that we need to use
2469 * a given ring with the context.
2470 *
Masanari Iida32197aa2014-10-20 23:53:13 +09002471 * Return: non-zero on error.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002472 */
Chris Wilsone2efd132016-05-24 14:53:34 +01002473static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +01002474 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002475{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002476 struct drm_i915_gem_object *ctx_obj;
Chris Wilson9021ad02016-05-24 14:53:37 +01002477 struct intel_context *ce = &ctx->engine[engine->id];
Oscar Mateo8c8579172014-07-24 17:04:14 +01002478 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01002479 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002480 int ret;
2481
Chris Wilson9021ad02016-05-24 14:53:37 +01002482 WARN_ON(ce->state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002483
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002484 context_size = round_up(intel_lr_context_size(engine), 4096);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002485
Alex Daid1675192015-08-12 15:43:43 +01002486 /* One extra page as the sharing data between driver and GuC */
2487 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2488
Chris Wilsonc0336662016-05-06 15:40:21 +01002489 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002490 if (IS_ERR(ctx_obj)) {
Dan Carpenter3126a662015-04-30 17:30:50 +03002491 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01002492 return PTR_ERR(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002493 }
2494
Zhi Wangbcd794c2016-06-16 08:07:01 -04002495 ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
Chris Wilson01101fa2015-09-03 13:01:39 +01002496 if (IS_ERR(ringbuf)) {
2497 ret = PTR_ERR(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002498 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002499 }
2500
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002501 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002502 if (ret) {
2503 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Nick Hoathe84fe802015-09-11 12:53:46 +01002504 goto error_ringbuf;
Oscar Mateo84c23772014-07-24 17:04:15 +01002505 }
2506
Chris Wilson9021ad02016-05-24 14:53:37 +01002507 ce->ringbuf = ringbuf;
2508 ce->state = ctx_obj;
2509 ce->initialised = engine->init_context == NULL;
Oscar Mateoede7d422014-07-24 17:04:12 +01002510
2511 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002512
Chris Wilson01101fa2015-09-03 13:01:39 +01002513error_ringbuf:
2514 intel_ringbuffer_free(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002515error_deref_obj:
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002516 drm_gem_object_unreference(&ctx_obj->base);
Chris Wilson9021ad02016-05-24 14:53:37 +01002517 ce->ringbuf = NULL;
2518 ce->state = NULL;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002519 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002520}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002521
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002522void intel_lr_context_reset(struct drm_i915_private *dev_priv,
Chris Wilsone2efd132016-05-24 14:53:34 +01002523 struct i915_gem_context *ctx)
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002524{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002525 struct intel_engine_cs *engine;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002526
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002527 for_each_engine(engine, dev_priv) {
Chris Wilson9021ad02016-05-24 14:53:37 +01002528 struct intel_context *ce = &ctx->engine[engine->id];
2529 struct drm_i915_gem_object *ctx_obj = ce->state;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002530 void *vaddr;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002531 uint32_t *reg_state;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002532
2533 if (!ctx_obj)
2534 continue;
2535
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002536 vaddr = i915_gem_object_pin_map(ctx_obj);
2537 if (WARN_ON(IS_ERR(vaddr)))
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002538 continue;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002539
2540 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2541 ctx_obj->dirty = true;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002542
2543 reg_state[CTX_RING_HEAD+1] = 0;
2544 reg_state[CTX_RING_TAIL+1] = 0;
2545
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002546 i915_gem_object_unpin_map(ctx_obj);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002547
Chris Wilson9021ad02016-05-24 14:53:37 +01002548 ce->ringbuf->head = 0;
2549 ce->ringbuf->tail = 0;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002550 }
2551}