blob: 3a23b9549f7bc2115a3e246262a1df31f5467184 [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
134
135#include <drm/drmP.h>
136#include <drm/i915_drm.h>
137#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300138#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100139
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000140#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100141#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
142#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
143
Thomas Daniele981e7b2014-07-24 17:04:39 +0100144#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4)
147#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148#define RING_EXECLIST1_ACTIVE (1 << 0x11)
149#define RING_EXECLIST0_ACTIVE (1 << 0x12)
150
151#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100157
158#define CTX_LRI_HEADER_0 0x01
159#define CTX_CONTEXT_CONTROL 0x02
160#define CTX_RING_HEAD 0x04
161#define CTX_RING_TAIL 0x06
162#define CTX_RING_BUFFER_START 0x08
163#define CTX_RING_BUFFER_CONTROL 0x0a
164#define CTX_BB_HEAD_U 0x0c
165#define CTX_BB_HEAD_L 0x0e
166#define CTX_BB_STATE 0x10
167#define CTX_SECOND_BB_HEAD_U 0x12
168#define CTX_SECOND_BB_HEAD_L 0x14
169#define CTX_SECOND_BB_STATE 0x16
170#define CTX_BB_PER_CTX_PTR 0x18
171#define CTX_RCS_INDIRECT_CTX 0x1a
172#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173#define CTX_LRI_HEADER_1 0x21
174#define CTX_CTX_TIMESTAMP 0x22
175#define CTX_PDP3_UDW 0x24
176#define CTX_PDP3_LDW 0x26
177#define CTX_PDP2_UDW 0x28
178#define CTX_PDP2_LDW 0x2a
179#define CTX_PDP1_UDW 0x2c
180#define CTX_PDP1_LDW 0x2e
181#define CTX_PDP0_UDW 0x30
182#define CTX_PDP0_LDW 0x32
183#define CTX_LRI_HEADER_2 0x41
184#define CTX_R_PWR_CLK_STATE 0x42
185#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
186
Ben Widawsky84b790f2014-07-24 17:04:36 +0100187#define GEN8_CTX_VALID (1<<0)
188#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189#define GEN8_CTX_FORCE_RESTORE (1<<2)
190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8)
Michel Thierrye5815a22015-04-08 12:13:32 +0100192
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200202} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100203
Ville Syrjälä9244a812015-11-04 23:20:09 +0200204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200207} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100208
Ben Widawsky84b790f2014-07-24 17:04:36 +0100209enum {
210 ADVANCED_CONTEXT = 0,
Michel Thierry2dba3232015-07-30 11:06:23 +0100211 LEGACY_32B_CONTEXT,
Ben Widawsky84b790f2014-07-24 17:04:36 +0100212 ADVANCED_AD_CONTEXT,
213 LEGACY_64B_CONTEXT
214};
Michel Thierry2dba3232015-07-30 11:06:23 +0100215#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
216#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
217 LEGACY_64B_CONTEXT :\
218 LEGACY_32B_CONTEXT)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100219enum {
220 FAULT_AND_HANG = 0,
221 FAULT_AND_HALT, /* Debug only */
222 FAULT_AND_STREAM,
223 FAULT_AND_CONTINUE /* Unsupported */
224};
225#define GEN8_CTX_ID_SHIFT 32
Michel Thierry71562912016-02-23 10:31:49 +0000226#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
227#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100228
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000229static int intel_lr_context_pin(struct intel_context *ctx,
230 struct intel_engine_cs *engine);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000231static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
232 struct drm_i915_gem_object *default_ctx_obj);
Nick Hoathe84fe802015-09-11 12:53:46 +0100233
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000234
Oscar Mateo73e4d072014-07-24 17:04:48 +0100235/**
236 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
237 * @dev: DRM device.
238 * @enable_execlists: value of i915.enable_execlists module parameter.
239 *
240 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000241 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100242 *
243 * Return: 1 if Execlists is supported and has to be enabled.
244 */
Oscar Mateo127f1002014-07-24 17:04:11 +0100245int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
246{
Daniel Vetterbd84b1e2014-08-11 15:57:57 +0200247 WARN_ON(i915.enable_ppgtt == -1);
248
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800249 /* On platforms with execlist available, vGPU will only
250 * support execlist mode, no ring buffer mode.
251 */
252 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
253 return 1;
254
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000255 if (INTEL_INFO(dev)->gen >= 9)
256 return 1;
257
Oscar Mateo127f1002014-07-24 17:04:11 +0100258 if (enable_execlists == 0)
259 return 0;
260
Oscar Mateo14bf9932014-07-24 17:04:34 +0100261 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
262 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100263 return 1;
264
265 return 0;
266}
Oscar Mateoede7d422014-07-24 17:04:12 +0100267
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000268static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000269logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000270{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000271 struct drm_device *dev = engine->dev;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000272
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000273 if (IS_GEN8(dev) || IS_GEN9(dev))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000274 engine->idle_lite_restore_wa = ~0;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000275
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000276 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000278 (engine->id == VCS || engine->id == VCS2);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000279
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000280 engine->ctx_desc_template = GEN8_CTX_VALID;
281 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000282 GEN8_CTX_ADDRESSING_MODE_SHIFT;
283 if (IS_GEN8(dev))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000284 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000286
287 /* TODO: WaDisableLiteRestore when we start using semaphore
288 * signalling between Command Streamers */
289 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
290
291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000293 if (engine->disable_lite_restore_wa)
294 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000295}
296
297/**
298 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
299 * descriptor for a pinned context
300 *
301 * @ctx: Context to work on
302 * @ring: Engine the descriptor will be used with
303 *
304 * The context descriptor encodes various attributes of a context,
305 * including its GTT address and some flags. Because it's fairly
306 * expensive to calculate, we'll just do it once and cache the result,
307 * which remains valid until the context is unpinned.
308 *
309 * This is what a descriptor looks like, from LSB to MSB:
310 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
311 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
312 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
313 * bits 52-63: reserved, may encode the engine ID (for GuC)
314 */
315static void
316intel_lr_context_descriptor_update(struct intel_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000317 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000318{
319 uint64_t lrca, desc;
320
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000321 lrca = ctx->engine[engine->id].lrc_vma->node.start +
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000322 LRC_PPHWSP_PN * PAGE_SIZE;
323
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000324 desc = engine->ctx_desc_template; /* bits 0-11 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000325 desc |= lrca; /* bits 12-31 */
326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
327
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000328 ctx->engine[engine->id].lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000329}
330
331uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000332 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000333{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000334 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000335}
336
Oscar Mateo73e4d072014-07-24 17:04:48 +0100337/**
338 * intel_execlists_ctx_id() - get the Execlists Context ID
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000339 * @ctx: Context to get the ID for
340 * @ring: Engine to get the ID for
Oscar Mateo73e4d072014-07-24 17:04:48 +0100341 *
342 * Do not confuse with ctx->id! Unfortunately we have a name overload
343 * here: the old context ID we pass to userspace as a handler so that
344 * they can refer to a context, and the new context ID we pass to the
345 * ELSP so that the GPU can inform us of the context status via
346 * interrupts.
347 *
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000348 * The context ID is a portion of the context descriptor, so we can
349 * just extract the required part from the cached descriptor.
350 *
Oscar Mateo73e4d072014-07-24 17:04:48 +0100351 * Return: 20-bits globally unique context ID.
352 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000353u32 intel_execlists_ctx_id(struct intel_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000354 struct intel_engine_cs *engine)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100355{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000356 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100357}
358
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300359static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
360 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100361{
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300362
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000363 struct intel_engine_cs *engine = rq0->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000364 struct drm_device *dev = engine->dev;
Tvrtko Ursulin6e7cc472014-11-13 17:51:51 +0000365 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300366 uint64_t desc[2];
Ben Widawsky84b790f2014-07-24 17:04:36 +0100367
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300368 if (rq1) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000369 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300370 rq1->elsp_submitted++;
371 } else {
372 desc[1] = 0;
373 }
Ben Widawsky84b790f2014-07-24 17:04:36 +0100374
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000375 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300376 rq0->elsp_submitted++;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100377
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300378 /* You must always write both descriptors in the order below. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000379 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
380 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
Chris Wilson6daccb02015-01-16 11:34:35 +0200381
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000382 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100383 /* The context is automatically loaded after the following */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000384 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100385
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300386 /* ELSP is a wo register, use another nearby reg for posting */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000387 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100388}
389
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000390static void
391execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
392{
393 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
394 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
395 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
396 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
397}
398
399static void execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100400{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000401 struct intel_engine_cs *engine = rq->engine;
Mika Kuoppala05d98242015-07-03 17:09:33 +0300402 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000403 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100404
Mika Kuoppala05d98242015-07-03 17:09:33 +0300405 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100406
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000407 /* True 32b PPGTT with dynamic page allocation: update PDP
408 * registers and point the unallocated PDPs to scratch page.
409 * PML4 is allocated during ppgtt init, so this is not needed
410 * in 48-bit mode.
411 */
412 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
413 execlists_update_context_pdps(ppgtt, reg_state);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100414}
415
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300416static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
417 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100418{
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000419 struct drm_i915_private *dev_priv = rq0->i915;
420
421 /* BUG_ON(!irqs_disabled()); */
422
Mika Kuoppala05d98242015-07-03 17:09:33 +0300423 execlists_update_context(rq0);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100424
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300425 if (rq1)
Mika Kuoppala05d98242015-07-03 17:09:33 +0300426 execlists_update_context(rq1);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100427
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000428 spin_lock(&dev_priv->uncore.lock);
429 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
430
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300431 execlists_elsp_write(rq0, rq1);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000432
433 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
434 spin_unlock(&dev_priv->uncore.lock);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100435}
436
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000437static void execlists_context_unqueue(struct intel_engine_cs *engine)
Michel Thierryacdd8842014-07-24 17:04:38 +0100438{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000439 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000440 struct drm_i915_gem_request *cursor, *tmp;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100441
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000442 assert_spin_locked(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100443
Peter Antoine779949f2015-05-11 16:03:27 +0100444 /*
445 * If irqs are not active generate a warning as batches that finish
446 * without the irqs may get lost and a GPU Hang may occur.
447 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000448 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
Peter Antoine779949f2015-05-11 16:03:27 +0100449
Michel Thierryacdd8842014-07-24 17:04:38 +0100450 /* Try to read in pairs */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000451 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
Michel Thierryacdd8842014-07-24 17:04:38 +0100452 execlist_link) {
453 if (!req0) {
454 req0 = cursor;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000455 } else if (req0->ctx == cursor->ctx) {
Michel Thierryacdd8842014-07-24 17:04:38 +0100456 /* Same ctx: ignore first request, as second request
457 * will update tail past first request's workload */
Oscar Mateoe1fee722014-07-24 17:04:40 +0100458 cursor->elsp_submitted = req0->elsp_submitted;
Tvrtko Ursulin7eb08a22016-01-11 14:08:35 +0000459 list_move_tail(&req0->execlist_link,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000460 &engine->execlist_retired_req_list);
Michel Thierryacdd8842014-07-24 17:04:38 +0100461 req0 = cursor;
462 } else {
463 req1 = cursor;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000464 WARN_ON(req1->elsp_submitted);
Michel Thierryacdd8842014-07-24 17:04:38 +0100465 break;
466 }
467 }
468
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000469 if (unlikely(!req0))
470 return;
471
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000472 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
Michel Thierry53292cd2015-04-15 18:11:33 +0100473 /*
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000474 * WaIdleLiteRestore: make sure we never cause a lite restore
475 * with HEAD==TAIL.
476 *
477 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
478 * resubmit the request. See gen8_emit_request() for where we
479 * prepare the padding after the end of the request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100480 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000481 struct intel_ringbuffer *ringbuf;
Michel Thierry53292cd2015-04-15 18:11:33 +0100482
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000483 ringbuf = req0->ctx->engine[engine->id].ringbuf;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000484 req0->tail += 8;
485 req0->tail &= ringbuf->size - 1;
Michel Thierry53292cd2015-04-15 18:11:33 +0100486 }
487
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300488 execlists_submit_requests(req0, req1);
Michel Thierryacdd8842014-07-24 17:04:38 +0100489}
490
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000491static unsigned int
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000492execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100493{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000494 struct drm_i915_gem_request *head_req;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100495
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000496 assert_spin_locked(&engine->execlist_lock);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100497
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000498 head_req = list_first_entry_or_null(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000499 struct drm_i915_gem_request,
Thomas Daniele981e7b2014-07-24 17:04:39 +0100500 execlist_link);
501
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000502 if (!head_req)
503 return 0;
Oscar Mateoe1fee722014-07-24 17:04:40 +0100504
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000505 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000506 return 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100507
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000508 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
509
510 if (--head_req->elsp_submitted > 0)
511 return 0;
512
513 list_move_tail(&head_req->execlist_link,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000514 &engine->execlist_retired_req_list);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000515
516 return 1;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100517}
518
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000519static u32
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000520get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000521 u32 *context_id)
Ben Widawsky91a41032016-01-05 10:30:07 -0800522{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000523 struct drm_i915_private *dev_priv = engine->dev->dev_private;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000524 u32 status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800525
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000526 read_pointer %= GEN8_CSB_ENTRIES;
Ben Widawsky91a41032016-01-05 10:30:07 -0800527
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000528 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000529
530 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
531 return 0;
532
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000533 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000534 read_pointer));
535
536 return status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800537}
538
Oscar Mateo73e4d072014-07-24 17:04:48 +0100539/**
Daniel Vetter3f7531c2014-12-10 17:41:43 +0100540 * intel_lrc_irq_handler() - handle Context Switch interrupts
Oscar Mateo73e4d072014-07-24 17:04:48 +0100541 * @ring: Engine Command Streamer to handle.
542 *
543 * Check the unread Context Status Buffers and manage the submission of new
544 * contexts to the ELSP accordingly.
545 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000546void intel_lrc_irq_handler(struct intel_engine_cs *engine)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100547{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000548 struct drm_i915_private *dev_priv = engine->dev->dev_private;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100549 u32 status_pointer;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000550 unsigned int read_pointer, write_pointer;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000551 u32 csb[GEN8_CSB_ENTRIES][2];
552 unsigned int csb_read = 0, i;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000553 unsigned int submit_contexts = 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100554
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000555 spin_lock(&dev_priv->uncore.lock);
556 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
557
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000558 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
Thomas Daniele981e7b2014-07-24 17:04:39 +0100559
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000560 read_pointer = engine->next_context_status_buffer;
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800561 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100562 if (read_pointer > write_pointer)
Michel Thierrydfc53c52015-09-28 13:25:12 +0100563 write_pointer += GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100564
Thomas Daniele981e7b2014-07-24 17:04:39 +0100565 while (read_pointer < write_pointer) {
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000566 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
567 break;
568 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
569 &csb[csb_read][1]);
570 csb_read++;
Michel Thierry5af05fe2015-09-04 12:59:15 +0100571 }
Thomas Daniele981e7b2014-07-24 17:04:39 +0100572
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000573 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100574
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800575 /* Update the read pointer to the old write pointer. Manual ringbuffer
576 * management ftw </sarcasm> */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000577 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000578 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000579 engine->next_context_status_buffer << 8));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000580
581 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
582 spin_unlock(&dev_priv->uncore.lock);
583
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000584 spin_lock(&engine->execlist_lock);
585
586 for (i = 0; i < csb_read; i++) {
587 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
588 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
589 if (execlists_check_remove_request(engine, csb[i][1]))
590 WARN(1, "Lite Restored request removed from queue\n");
591 } else
592 WARN(1, "Preemption without Lite Restore\n");
593 }
594
595 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
596 GEN8_CTX_STATUS_ELEMENT_SWITCH))
597 submit_contexts +=
598 execlists_check_remove_request(engine, csb[i][1]);
599 }
600
601 if (submit_contexts) {
602 if (!engine->disable_lite_restore_wa ||
603 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
604 execlists_context_unqueue(engine);
605 }
606
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000607 spin_unlock(&engine->execlist_lock);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000608
609 if (unlikely(submit_contexts > 2))
610 DRM_ERROR("More than two context complete events?\n");
Thomas Daniele981e7b2014-07-24 17:04:39 +0100611}
612
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000613static void execlists_context_queue(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100614{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000615 struct intel_engine_cs *engine = request->engine;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000616 struct drm_i915_gem_request *cursor;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100617 int num_elements = 0;
Michel Thierryacdd8842014-07-24 17:04:38 +0100618
Dave Gordoned54c1a2016-01-19 19:02:54 +0000619 if (request->ctx != request->i915->kernel_context)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000620 intel_lr_context_pin(request->ctx, engine);
Daniel Vetteraf3302b2015-12-04 17:27:15 +0100621
John Harrison9bb1af42015-05-29 17:44:13 +0100622 i915_gem_request_reference(request);
623
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000624 spin_lock_irq(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100625
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000626 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100627 if (++num_elements > 2)
628 break;
629
630 if (num_elements > 2) {
Nick Hoath6d3d8272015-01-15 13:10:39 +0000631 struct drm_i915_gem_request *tail_req;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100632
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000633 tail_req = list_last_entry(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000634 struct drm_i915_gem_request,
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100635 execlist_link);
636
John Harrisonae707972015-05-29 17:44:14 +0100637 if (request->ctx == tail_req->ctx) {
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100638 WARN(tail_req->elsp_submitted != 0,
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000639 "More than 2 already-submitted reqs queued\n");
Tvrtko Ursulin7eb08a22016-01-11 14:08:35 +0000640 list_move_tail(&tail_req->execlist_link,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000641 &engine->execlist_retired_req_list);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100642 }
643 }
644
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000645 list_add_tail(&request->execlist_link, &engine->execlist_queue);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100646 if (num_elements == 0)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000647 execlists_context_unqueue(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100648
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000649 spin_unlock_irq(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100650}
651
John Harrison2f200552015-05-29 17:43:53 +0100652static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100653{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000654 struct intel_engine_cs *engine = req->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100655 uint32_t flush_domains;
656 int ret;
657
658 flush_domains = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000659 if (engine->gpu_caches_dirty)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100660 flush_domains = I915_GEM_GPU_DOMAINS;
661
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000662 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100663 if (ret)
664 return ret;
665
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000666 engine->gpu_caches_dirty = false;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100667 return 0;
668}
669
John Harrison535fbe82015-05-29 17:43:32 +0100670static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100671 struct list_head *vmas)
672{
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000673 const unsigned other_rings = ~intel_engine_flag(req->engine);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100674 struct i915_vma *vma;
675 uint32_t flush_domains = 0;
676 bool flush_chipset = false;
677 int ret;
678
679 list_for_each_entry(vma, vmas, exec_list) {
680 struct drm_i915_gem_object *obj = vma->obj;
681
Chris Wilson03ade512015-04-27 13:41:18 +0100682 if (obj->active & other_rings) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000683 ret = i915_gem_object_sync(obj, req->engine, &req);
Chris Wilson03ade512015-04-27 13:41:18 +0100684 if (ret)
685 return ret;
686 }
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100687
688 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
689 flush_chipset |= i915_gem_clflush_object(obj, false);
690
691 flush_domains |= obj->base.write_domain;
692 }
693
694 if (flush_domains & I915_GEM_DOMAIN_GTT)
695 wmb();
696
697 /* Unconditionally invalidate gpu caches and ensure that we do flush
698 * any residual writes from the previous batch.
699 */
John Harrison2f200552015-05-29 17:43:53 +0100700 return logical_ring_invalidate_all_caches(req);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100701}
702
John Harrison40e895c2015-05-29 17:43:26 +0100703int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000704{
Dave Gordone28e4042016-01-19 19:02:55 +0000705 int ret = 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000706
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000707 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
Mika Kuoppalaf3cc01f2015-07-06 11:08:30 +0300708
Alex Daia7e02192015-12-16 11:45:55 -0800709 if (i915.enable_guc_submission) {
710 /*
711 * Check that the GuC has space for the request before
712 * going any further, as the i915_add_request() call
713 * later on mustn't fail ...
714 */
715 struct intel_guc *guc = &request->i915->guc;
716
717 ret = i915_guc_wq_check_space(guc->execbuf_client);
718 if (ret)
719 return ret;
720 }
721
Dave Gordone28e4042016-01-19 19:02:55 +0000722 if (request->ctx != request->i915->kernel_context)
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000723 ret = intel_lr_context_pin(request->ctx, request->engine);
Dave Gordone28e4042016-01-19 19:02:55 +0000724
725 return ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000726}
727
John Harrisonae707972015-05-29 17:44:14 +0100728static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
Chris Wilson595e1ee2015-04-07 16:20:51 +0100729 int bytes)
John Harrisonbc0dce32015-03-19 12:30:07 +0000730{
John Harrisonae707972015-05-29 17:44:14 +0100731 struct intel_ringbuffer *ringbuf = req->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000732 struct intel_engine_cs *engine = req->engine;
John Harrisonae707972015-05-29 17:44:14 +0100733 struct drm_i915_gem_request *target;
Chris Wilsonb4716182015-04-27 13:41:17 +0100734 unsigned space;
735 int ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000736
737 if (intel_ring_space(ringbuf) >= bytes)
738 return 0;
739
John Harrison79bbcc22015-06-30 12:40:55 +0100740 /* The whole point of reserving space is to not wait! */
741 WARN_ON(ringbuf->reserved_in_use);
742
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000743 list_for_each_entry(target, &engine->request_list, list) {
John Harrisonbc0dce32015-03-19 12:30:07 +0000744 /*
745 * The request queue is per-engine, so can contain requests
746 * from multiple ringbuffers. Here, we must ignore any that
747 * aren't from the ringbuffer we're considering.
748 */
John Harrisonae707972015-05-29 17:44:14 +0100749 if (target->ringbuf != ringbuf)
John Harrisonbc0dce32015-03-19 12:30:07 +0000750 continue;
751
752 /* Would completion of this request free enough space? */
John Harrisonae707972015-05-29 17:44:14 +0100753 space = __intel_ring_space(target->postfix, ringbuf->tail,
Chris Wilsonb4716182015-04-27 13:41:17 +0100754 ringbuf->size);
755 if (space >= bytes)
John Harrisonbc0dce32015-03-19 12:30:07 +0000756 break;
John Harrisonbc0dce32015-03-19 12:30:07 +0000757 }
758
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000759 if (WARN_ON(&target->list == &engine->request_list))
John Harrisonbc0dce32015-03-19 12:30:07 +0000760 return -ENOSPC;
761
John Harrisonae707972015-05-29 17:44:14 +0100762 ret = i915_wait_request(target);
John Harrisonbc0dce32015-03-19 12:30:07 +0000763 if (ret)
764 return ret;
765
Chris Wilsonb4716182015-04-27 13:41:17 +0100766 ringbuf->space = space;
767 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000768}
769
770/*
771 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
John Harrisonae707972015-05-29 17:44:14 +0100772 * @request: Request to advance the logical ringbuffer of.
John Harrisonbc0dce32015-03-19 12:30:07 +0000773 *
774 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
775 * really happens during submission is that the context and current tail will be placed
776 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
777 * point, the tail *inside* the context is updated and the ELSP written to.
778 */
Chris Wilson7c17d372016-01-20 15:43:35 +0200779static int
John Harrisonae707972015-05-29 17:44:14 +0100780intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000781{
Chris Wilson7c17d372016-01-20 15:43:35 +0200782 struct intel_ringbuffer *ringbuf = request->ringbuf;
Alex Daid1675192015-08-12 15:43:43 +0100783 struct drm_i915_private *dev_priv = request->i915;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000784 struct intel_engine_cs *engine = request->engine;
John Harrisonbc0dce32015-03-19 12:30:07 +0000785
Chris Wilson7c17d372016-01-20 15:43:35 +0200786 intel_logical_ring_advance(ringbuf);
787 request->tail = ringbuf->tail;
John Harrisonbc0dce32015-03-19 12:30:07 +0000788
Chris Wilson7c17d372016-01-20 15:43:35 +0200789 /*
790 * Here we add two extra NOOPs as padding to avoid
791 * lite restore of a context with HEAD==TAIL.
792 *
793 * Caller must reserve WA_TAIL_DWORDS for us!
794 */
795 intel_logical_ring_emit(ringbuf, MI_NOOP);
796 intel_logical_ring_emit(ringbuf, MI_NOOP);
797 intel_logical_ring_advance(ringbuf);
Alex Daid1675192015-08-12 15:43:43 +0100798
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000799 if (intel_engine_stopped(engine))
Chris Wilson7c17d372016-01-20 15:43:35 +0200800 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000801
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +0000802 if (engine->last_context != request->ctx) {
803 if (engine->last_context)
804 intel_lr_context_unpin(engine->last_context, engine);
805 if (request->ctx != request->i915->kernel_context) {
806 intel_lr_context_pin(request->ctx, engine);
807 engine->last_context = request->ctx;
808 } else {
809 engine->last_context = NULL;
810 }
811 }
812
Alex Daid1675192015-08-12 15:43:43 +0100813 if (dev_priv->guc.execbuf_client)
814 i915_guc_submit(dev_priv->guc.execbuf_client, request);
815 else
816 execlists_context_queue(request);
Chris Wilson7c17d372016-01-20 15:43:35 +0200817
818 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000819}
820
John Harrison79bbcc22015-06-30 12:40:55 +0100821static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
John Harrisonbc0dce32015-03-19 12:30:07 +0000822{
823 uint32_t __iomem *virt;
824 int rem = ringbuf->size - ringbuf->tail;
825
John Harrisonbc0dce32015-03-19 12:30:07 +0000826 virt = ringbuf->virtual_start + ringbuf->tail;
827 rem /= 4;
828 while (rem--)
829 iowrite32(MI_NOOP, virt++);
830
831 ringbuf->tail = 0;
832 intel_ring_update_space(ringbuf);
John Harrisonbc0dce32015-03-19 12:30:07 +0000833}
834
John Harrisonae707972015-05-29 17:44:14 +0100835static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
John Harrisonbc0dce32015-03-19 12:30:07 +0000836{
John Harrisonae707972015-05-29 17:44:14 +0100837 struct intel_ringbuffer *ringbuf = req->ringbuf;
John Harrison79bbcc22015-06-30 12:40:55 +0100838 int remain_usable = ringbuf->effective_size - ringbuf->tail;
839 int remain_actual = ringbuf->size - ringbuf->tail;
840 int ret, total_bytes, wait_bytes = 0;
841 bool need_wrap = false;
John Harrisonbc0dce32015-03-19 12:30:07 +0000842
John Harrison79bbcc22015-06-30 12:40:55 +0100843 if (ringbuf->reserved_in_use)
844 total_bytes = bytes;
845 else
846 total_bytes = bytes + ringbuf->reserved_size;
John Harrison29b1b412015-06-18 13:10:09 +0100847
John Harrison79bbcc22015-06-30 12:40:55 +0100848 if (unlikely(bytes > remain_usable)) {
849 /*
850 * Not enough space for the basic request. So need to flush
851 * out the remainder and then wait for base + reserved.
852 */
853 wait_bytes = remain_actual + total_bytes;
854 need_wrap = true;
855 } else {
856 if (unlikely(total_bytes > remain_usable)) {
857 /*
858 * The base request will fit but the reserved space
859 * falls off the end. So only need to to wait for the
860 * reserved size after flushing out the remainder.
861 */
862 wait_bytes = remain_actual + ringbuf->reserved_size;
863 need_wrap = true;
864 } else if (total_bytes > ringbuf->space) {
865 /* No wrapping required, just waiting. */
866 wait_bytes = total_bytes;
John Harrison29b1b412015-06-18 13:10:09 +0100867 }
John Harrisonbc0dce32015-03-19 12:30:07 +0000868 }
869
John Harrison79bbcc22015-06-30 12:40:55 +0100870 if (wait_bytes) {
871 ret = logical_ring_wait_for_space(req, wait_bytes);
John Harrisonbc0dce32015-03-19 12:30:07 +0000872 if (unlikely(ret))
873 return ret;
John Harrison79bbcc22015-06-30 12:40:55 +0100874
875 if (need_wrap)
876 __wrap_ring_buffer(ringbuf);
John Harrisonbc0dce32015-03-19 12:30:07 +0000877 }
878
879 return 0;
880}
881
882/**
883 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
884 *
Masanari Iida374887b2015-09-13 21:08:31 +0900885 * @req: The request to start some new work for
John Harrisonbc0dce32015-03-19 12:30:07 +0000886 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
887 *
888 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
889 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
890 * and also preallocates a request (every workload submission is still mediated through
891 * requests, same as it did with legacy ringbuffer submission).
892 *
893 * Return: non-zero if the ringbuffer is not ready to be written to.
894 */
Peter Antoine3bbaba02015-07-10 20:13:11 +0300895int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
John Harrisonbc0dce32015-03-19 12:30:07 +0000896{
John Harrison4d616a22015-05-29 17:44:08 +0100897 struct drm_i915_private *dev_priv;
John Harrisonbc0dce32015-03-19 12:30:07 +0000898 int ret;
899
John Harrison4d616a22015-05-29 17:44:08 +0100900 WARN_ON(req == NULL);
Tvrtko Ursulin39dabec2016-03-17 13:04:10 +0000901 dev_priv = req->i915;
John Harrison4d616a22015-05-29 17:44:08 +0100902
John Harrisonbc0dce32015-03-19 12:30:07 +0000903 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
904 dev_priv->mm.interruptible);
905 if (ret)
906 return ret;
907
John Harrisonae707972015-05-29 17:44:14 +0100908 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
John Harrisonbc0dce32015-03-19 12:30:07 +0000909 if (ret)
910 return ret;
911
John Harrison4d616a22015-05-29 17:44:08 +0100912 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
John Harrisonbc0dce32015-03-19 12:30:07 +0000913 return 0;
914}
915
John Harrisonccd98fe2015-05-29 17:44:09 +0100916int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
917{
918 /*
919 * The first call merely notes the reserve request and is common for
920 * all back ends. The subsequent localised _begin() call actually
921 * ensures that the reservation is available. Without the begin, if
922 * the request creator immediately submitted the request without
923 * adding any commands to it then there might not actually be
924 * sufficient room for the submission commands.
925 */
926 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
927
928 return intel_logical_ring_begin(request, 0);
929}
930
Oscar Mateo73e4d072014-07-24 17:04:48 +0100931/**
932 * execlists_submission() - submit a batchbuffer for execution, Execlists style
933 * @dev: DRM device.
934 * @file: DRM file.
935 * @ring: Engine Command Streamer to submit to.
936 * @ctx: Context to employ for this submission.
937 * @args: execbuffer call arguments.
938 * @vmas: list of vmas.
939 * @batch_obj: the batchbuffer to submit.
940 * @exec_start: batchbuffer start virtual address pointer.
John Harrison8e004ef2015-02-13 11:48:10 +0000941 * @dispatch_flags: translated execbuffer call flags.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100942 *
943 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
944 * away the submission details of the execbuffer ioctl call.
945 *
946 * Return: non-zero if the submission fails.
947 */
John Harrison5f19e2b2015-05-29 17:43:27 +0100948int intel_execlists_submission(struct i915_execbuffer_params *params,
Oscar Mateo454afeb2014-07-24 17:04:22 +0100949 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +0100950 struct list_head *vmas)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100951{
John Harrison5f19e2b2015-05-29 17:43:27 +0100952 struct drm_device *dev = params->dev;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000953 struct intel_engine_cs *engine = params->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100954 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000955 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
John Harrison5f19e2b2015-05-29 17:43:27 +0100956 u64 exec_start;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100957 int instp_mode;
958 u32 instp_mask;
959 int ret;
960
961 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
962 instp_mask = I915_EXEC_CONSTANTS_MASK;
963 switch (instp_mode) {
964 case I915_EXEC_CONSTANTS_REL_GENERAL:
965 case I915_EXEC_CONSTANTS_ABSOLUTE:
966 case I915_EXEC_CONSTANTS_REL_SURFACE:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000967 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100968 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
969 return -EINVAL;
970 }
971
972 if (instp_mode != dev_priv->relative_constants_mode) {
973 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
974 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
975 return -EINVAL;
976 }
977
978 /* The HW changed the meaning on this bit on gen6 */
979 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
980 }
981 break;
982 default:
983 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
984 return -EINVAL;
985 }
986
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100987 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
988 DRM_DEBUG("sol reset is gen7 only\n");
989 return -EINVAL;
990 }
991
John Harrison535fbe82015-05-29 17:43:32 +0100992 ret = execlists_move_to_gpu(params->request, vmas);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100993 if (ret)
994 return ret;
995
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000996 if (engine == &dev_priv->engine[RCS] &&
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100997 instp_mode != dev_priv->relative_constants_mode) {
John Harrison4d616a22015-05-29 17:44:08 +0100998 ret = intel_logical_ring_begin(params->request, 4);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100999 if (ret)
1000 return ret;
1001
1002 intel_logical_ring_emit(ringbuf, MI_NOOP);
1003 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001004 intel_logical_ring_emit_reg(ringbuf, INSTPM);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +01001005 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
1006 intel_logical_ring_advance(ringbuf);
1007
1008 dev_priv->relative_constants_mode = instp_mode;
1009 }
1010
John Harrison5f19e2b2015-05-29 17:43:27 +01001011 exec_start = params->batch_obj_vm_offset +
1012 args->batch_start_offset;
1013
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001014 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +01001015 if (ret)
1016 return ret;
1017
John Harrison95c24162015-05-29 17:43:31 +01001018 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
John Harrison5e4be7b2015-02-13 11:48:11 +00001019
John Harrison8a8edb52015-05-29 17:43:33 +01001020 i915_gem_execbuffer_move_to_active(vmas, params->request);
John Harrisonadeca762015-05-29 17:43:28 +01001021 i915_gem_execbuffer_retire_commands(params);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +01001022
Oscar Mateo454afeb2014-07-24 17:04:22 +01001023 return 0;
1024}
1025
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001026void intel_execlists_retire_requests(struct intel_engine_cs *engine)
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001027{
Nick Hoath6d3d8272015-01-15 13:10:39 +00001028 struct drm_i915_gem_request *req, *tmp;
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001029 struct list_head retired_list;
1030
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001031 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1032 if (list_empty(&engine->execlist_retired_req_list))
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001033 return;
1034
1035 INIT_LIST_HEAD(&retired_list);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001036 spin_lock_irq(&engine->execlist_lock);
1037 list_replace_init(&engine->execlist_retired_req_list, &retired_list);
1038 spin_unlock_irq(&engine->execlist_lock);
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001039
1040 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
Daniel Vetteraf3302b2015-12-04 17:27:15 +01001041 struct intel_context *ctx = req->ctx;
1042 struct drm_i915_gem_object *ctx_obj =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001043 ctx->engine[engine->id].state;
Daniel Vetteraf3302b2015-12-04 17:27:15 +01001044
Dave Gordoned54c1a2016-01-19 19:02:54 +00001045 if (ctx_obj && (ctx != req->i915->kernel_context))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001046 intel_lr_context_unpin(ctx, engine);
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001047
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001048 list_del(&req->execlist_link);
Nick Hoathf8210792015-01-29 16:55:07 +00001049 i915_gem_request_unreference(req);
Thomas Danielc86ee3a92014-11-13 10:27:05 +00001050 }
1051}
1052
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001053void intel_logical_ring_stop(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001054{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001055 struct drm_i915_private *dev_priv = engine->dev->dev_private;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001056 int ret;
1057
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001058 if (!intel_engine_initialized(engine))
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001059 return;
1060
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001061 ret = intel_engine_idle(engine);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001062 if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001063 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001064 engine->name, ret);
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001065
1066 /* TODO: Is this correct with Execlists enabled? */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001067 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
1068 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
1069 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001070 return;
1071 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001072 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +01001073}
1074
John Harrison4866d722015-05-29 17:43:55 +01001075int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
Oscar Mateo48e29f52014-07-24 17:04:29 +01001076{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001077 struct intel_engine_cs *engine = req->engine;
Oscar Mateo48e29f52014-07-24 17:04:29 +01001078 int ret;
1079
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001080 if (!engine->gpu_caches_dirty)
Oscar Mateo48e29f52014-07-24 17:04:29 +01001081 return 0;
1082
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001083 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
Oscar Mateo48e29f52014-07-24 17:04:29 +01001084 if (ret)
1085 return ret;
1086
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001087 engine->gpu_caches_dirty = false;
Oscar Mateo48e29f52014-07-24 17:04:29 +01001088 return 0;
1089}
1090
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001091static int intel_lr_context_do_pin(struct intel_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001092 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +00001093{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001094 struct drm_device *dev = engine->dev;
Nick Hoathe84fe802015-09-11 12:53:46 +01001095 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001096 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1097 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
Tvrtko Ursulin82352e92016-01-15 17:12:45 +00001098 struct page *lrc_state_page;
Tvrtko Ursulin77b04a02016-01-22 12:42:47 +00001099 uint32_t *lrc_reg_state;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001100 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +00001101
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001102 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001103
Nick Hoathe84fe802015-09-11 12:53:46 +01001104 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1105 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1106 if (ret)
1107 return ret;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001108
Tvrtko Ursulin82352e92016-01-15 17:12:45 +00001109 lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
1110 if (WARN_ON(!lrc_state_page)) {
1111 ret = -ENODEV;
1112 goto unpin_ctx_obj;
1113 }
1114
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001115 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01001116 if (ret)
1117 goto unpin_ctx_obj;
Alex Daid1675192015-08-12 15:43:43 +01001118
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001119 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1120 intel_lr_context_descriptor_update(ctx, engine);
Tvrtko Ursulin77b04a02016-01-22 12:42:47 +00001121 lrc_reg_state = kmap(lrc_state_page);
1122 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001123 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
Nick Hoathe84fe802015-09-11 12:53:46 +01001124 ctx_obj->dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +02001125
Nick Hoathe84fe802015-09-11 12:53:46 +01001126 /* Invalidate GuC TLB. */
1127 if (i915.enable_guc_submission)
1128 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
Oscar Mateodcb4c122014-11-13 10:28:10 +00001129
1130 return ret;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001131
1132unpin_ctx_obj:
1133 i915_gem_object_ggtt_unpin(ctx_obj);
Nick Hoathe84fe802015-09-11 12:53:46 +01001134
1135 return ret;
1136}
1137
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001138static int intel_lr_context_pin(struct intel_context *ctx,
1139 struct intel_engine_cs *engine)
Nick Hoathe84fe802015-09-11 12:53:46 +01001140{
1141 int ret = 0;
Nick Hoathe84fe802015-09-11 12:53:46 +01001142
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001143 if (ctx->engine[engine->id].pin_count++ == 0) {
1144 ret = intel_lr_context_do_pin(ctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01001145 if (ret)
1146 goto reset_pin_count;
Tvrtko Ursulin321fe302016-01-28 10:29:55 +00001147
1148 i915_gem_context_reference(ctx);
Nick Hoathe84fe802015-09-11 12:53:46 +01001149 }
1150 return ret;
1151
Mika Kuoppalaa7cbede2015-01-13 11:32:25 +02001152reset_pin_count:
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001153 ctx->engine[engine->id].pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +00001154 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +00001155}
1156
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001157void intel_lr_context_unpin(struct intel_context *ctx,
1158 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +00001159{
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001160 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
Daniel Vetteraf3302b2015-12-04 17:27:15 +01001161
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +00001162 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001163 if (--ctx->engine[engine->id].pin_count == 0) {
1164 kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
1165 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
Tvrtko Ursulin82352e92016-01-15 17:12:45 +00001166 i915_gem_object_ggtt_unpin(ctx_obj);
Tvrtko Ursuline52928232016-01-28 10:29:54 +00001167 ctx->engine[engine->id].lrc_vma = NULL;
1168 ctx->engine[engine->id].lrc_desc = 0;
1169 ctx->engine[engine->id].lrc_reg_state = NULL;
Tvrtko Ursulin321fe302016-01-28 10:29:55 +00001170
1171 i915_gem_context_unreference(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +00001172 }
1173}
1174
John Harrisone2be4fa2015-05-29 17:43:54 +01001175static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +00001176{
1177 int ret, i;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001178 struct intel_engine_cs *engine = req->engine;
John Harrisone2be4fa2015-05-29 17:43:54 +01001179 struct intel_ringbuffer *ringbuf = req->ringbuf;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001180 struct drm_device *dev = engine->dev;
Michel Thierry771b9a52014-11-11 16:47:33 +00001181 struct drm_i915_private *dev_priv = dev->dev_private;
1182 struct i915_workarounds *w = &dev_priv->workarounds;
1183
Boyer, Waynecd7feaa2016-01-06 17:15:29 -08001184 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +00001185 return 0;
1186
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001187 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001188 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001189 if (ret)
1190 return ret;
1191
John Harrison4d616a22015-05-29 17:44:08 +01001192 ret = intel_logical_ring_begin(req, w->count * 2 + 2);
Michel Thierry771b9a52014-11-11 16:47:33 +00001193 if (ret)
1194 return ret;
1195
1196 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1197 for (i = 0; i < w->count; i++) {
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001198 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
Michel Thierry771b9a52014-11-11 16:47:33 +00001199 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1200 }
1201 intel_logical_ring_emit(ringbuf, MI_NOOP);
1202
1203 intel_logical_ring_advance(ringbuf);
1204
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001205 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001206 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001207 if (ret)
1208 return ret;
1209
1210 return 0;
1211}
1212
Arun Siluvery83b8a982015-07-08 10:27:05 +01001213#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001214 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001215 int __index = (index)++; \
1216 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001217 return -ENOSPC; \
1218 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001219 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001220 } while (0)
1221
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001222#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001223 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +01001224
1225/*
1226 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1227 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1228 * but there is a slight complication as this is applied in WA batch where the
1229 * values are only initialized once so we cannot take register value at the
1230 * beginning and reuse it further; hence we save its value to memory, upload a
1231 * constant value with bit21 set and then we restore it back with the saved value.
1232 * To simplify the WA, a constant value is formed by using the default value
1233 * of this register. This shouldn't be a problem because we are only modifying
1234 * it for a short period and this batch in non-premptible. We can ofcourse
1235 * use additional instructions that read the actual value of the register
1236 * at that time and set our bit of interest but it makes the WA complicated.
1237 *
1238 * This WA is also required for Gen9 so extracting as a function avoids
1239 * code duplication.
1240 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001241static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Arun Siluvery9e000842015-07-03 14:27:31 +01001242 uint32_t *const batch,
1243 uint32_t index)
1244{
1245 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1246
Arun Siluverya4106a72015-07-14 15:01:29 +01001247 /*
1248 * WaDisableLSQCROPERFforOCL:skl
1249 * This WA is implemented in skl_init_clock_gating() but since
1250 * this batch updates GEN8_L3SQCREG4 with default value we need to
1251 * set this bit here to retain the WA during flush.
1252 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001253 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
Arun Siluverya4106a72015-07-14 15:01:29 +01001254 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1255
Arun Siluveryf1afe242015-08-04 16:22:20 +01001256 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001257 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001258 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001259 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001260 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001261
Arun Siluvery83b8a982015-07-08 10:27:05 +01001262 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001263 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001264 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +01001265
Arun Siluvery83b8a982015-07-08 10:27:05 +01001266 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1267 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1268 PIPE_CONTROL_DC_FLUSH_ENABLE));
1269 wa_ctx_emit(batch, index, 0);
1270 wa_ctx_emit(batch, index, 0);
1271 wa_ctx_emit(batch, index, 0);
1272 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001273
Arun Siluveryf1afe242015-08-04 16:22:20 +01001274 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001275 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001276 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001277 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001278 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001279
1280 return index;
1281}
1282
Arun Siluvery17ee9502015-06-19 19:07:01 +01001283static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1284 uint32_t offset,
1285 uint32_t start_alignment)
1286{
1287 return wa_ctx->offset = ALIGN(offset, start_alignment);
1288}
1289
1290static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1291 uint32_t offset,
1292 uint32_t size_alignment)
1293{
1294 wa_ctx->size = offset - wa_ctx->offset;
1295
1296 WARN(wa_ctx->size % size_alignment,
1297 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1298 wa_ctx->size, size_alignment);
1299 return 0;
1300}
1301
1302/**
1303 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1304 *
1305 * @ring: only applicable for RCS
1306 * @wa_ctx: structure representing wa_ctx
1307 * offset: specifies start of the batch, should be cache-aligned. This is updated
1308 * with the offset value received as input.
1309 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1310 * @batch: page in which WA are loaded
1311 * @offset: This field specifies the start of the batch, it should be
1312 * cache-aligned otherwise it is adjusted accordingly.
1313 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1314 * initialized at the beginning and shared across all contexts but this field
1315 * helps us to have multiple batches at different offsets and select them based
1316 * on a criteria. At the moment this batch always start at the beginning of the page
1317 * and at this point we don't have multiple wa_ctx batch buffers.
1318 *
1319 * The number of WA applied are not known at the beginning; we use this field
1320 * to return the no of DWORDS written.
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001321 *
Arun Siluvery17ee9502015-06-19 19:07:01 +01001322 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1323 * so it adds NOOPs as padding to make it cacheline aligned.
1324 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1325 * makes a complete batch buffer.
1326 *
1327 * Return: non-zero if we exceed the PAGE_SIZE limit.
1328 */
1329
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001330static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001331 struct i915_wa_ctx_bb *wa_ctx,
1332 uint32_t *const batch,
1333 uint32_t *offset)
1334{
Arun Siluvery0160f052015-06-23 15:46:57 +01001335 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001336 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1337
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001338 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001339 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001340
Arun Siluveryc82435b2015-06-19 18:37:13 +01001341 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001342 if (IS_BROADWELL(engine->dev)) {
1343 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001344 if (rc < 0)
1345 return rc;
1346 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001347 }
1348
Arun Siluvery0160f052015-06-23 15:46:57 +01001349 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1350 /* Actual scratch location is at 128 bytes offset */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001351 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001352
Arun Siluvery83b8a982015-07-08 10:27:05 +01001353 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1354 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1355 PIPE_CONTROL_GLOBAL_GTT_IVB |
1356 PIPE_CONTROL_CS_STALL |
1357 PIPE_CONTROL_QW_WRITE));
1358 wa_ctx_emit(batch, index, scratch_addr);
1359 wa_ctx_emit(batch, index, 0);
1360 wa_ctx_emit(batch, index, 0);
1361 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001362
Arun Siluvery17ee9502015-06-19 19:07:01 +01001363 /* Pad to end of cacheline */
1364 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001365 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001366
1367 /*
1368 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1369 * execution depends on the length specified in terms of cache lines
1370 * in the register CTX_RCS_INDIRECT_CTX
1371 */
1372
1373 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1374}
1375
1376/**
1377 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1378 *
1379 * @ring: only applicable for RCS
1380 * @wa_ctx: structure representing wa_ctx
1381 * offset: specifies start of the batch, should be cache-aligned.
1382 * size: size of the batch in DWORDS but HW expects in terms of cachelines
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001383 * @batch: page in which WA are loaded
Arun Siluvery17ee9502015-06-19 19:07:01 +01001384 * @offset: This field specifies the start of this batch.
1385 * This batch is started immediately after indirect_ctx batch. Since we ensure
1386 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1387 *
1388 * The number of DWORDS written are returned using this field.
1389 *
1390 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1391 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1392 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001393static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001394 struct i915_wa_ctx_bb *wa_ctx,
1395 uint32_t *const batch,
1396 uint32_t *offset)
1397{
1398 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1399
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001400 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001401 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001402
Arun Siluvery83b8a982015-07-08 10:27:05 +01001403 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001404
1405 return wa_ctx_end(wa_ctx, *offset = index, 1);
1406}
1407
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001408static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001409 struct i915_wa_ctx_bb *wa_ctx,
1410 uint32_t *const batch,
1411 uint32_t *offset)
1412{
Arun Siluverya4106a72015-07-14 15:01:29 +01001413 int ret;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001414 struct drm_device *dev = engine->dev;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001415 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1416
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001417 /* WaDisableCtxRestoreArbitration:skl,bxt */
Jani Nikulae87a0052015-10-20 15:22:02 +03001418 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
Tim Gorecbdc12a2015-10-26 10:48:58 +00001419 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001420 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery0504cff2015-07-14 15:01:27 +01001421
Arun Siluverya4106a72015-07-14 15:01:29 +01001422 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001423 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001424 if (ret < 0)
1425 return ret;
1426 index = ret;
1427
Arun Siluvery0504cff2015-07-14 15:01:27 +01001428 /* Pad to end of cacheline */
1429 while (index % CACHELINE_DWORDS)
1430 wa_ctx_emit(batch, index, MI_NOOP);
1431
1432 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1433}
1434
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001435static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001436 struct i915_wa_ctx_bb *wa_ctx,
1437 uint32_t *const batch,
1438 uint32_t *offset)
1439{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001440 struct drm_device *dev = engine->dev;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001441 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1442
Arun Siluvery9b014352015-07-14 15:01:30 +01001443 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Jani Nikulae87a0052015-10-20 15:22:02 +03001444 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
Tim Gorecbdc12a2015-10-26 10:48:58 +00001445 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
Arun Siluvery9b014352015-07-14 15:01:30 +01001446 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001447 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
Arun Siluvery9b014352015-07-14 15:01:30 +01001448 wa_ctx_emit(batch, index,
1449 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1450 wa_ctx_emit(batch, index, MI_NOOP);
1451 }
1452
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001453 /* WaDisableCtxRestoreArbitration:skl,bxt */
Jani Nikulae87a0052015-10-20 15:22:02 +03001454 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
Tim Gorecbdc12a2015-10-26 10:48:58 +00001455 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001456 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1457
Arun Siluvery0504cff2015-07-14 15:01:27 +01001458 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1459
1460 return wa_ctx_end(wa_ctx, *offset = index, 1);
1461}
1462
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001463static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001464{
1465 int ret;
1466
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001467 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
1468 PAGE_ALIGN(size));
1469 if (!engine->wa_ctx.obj) {
Arun Siluvery17ee9502015-06-19 19:07:01 +01001470 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1471 return -ENOMEM;
1472 }
1473
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001474 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001475 if (ret) {
1476 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1477 ret);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001478 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001479 return ret;
1480 }
1481
1482 return 0;
1483}
1484
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001485static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001486{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001487 if (engine->wa_ctx.obj) {
1488 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1489 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1490 engine->wa_ctx.obj = NULL;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001491 }
1492}
1493
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001494static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001495{
1496 int ret;
1497 uint32_t *batch;
1498 uint32_t offset;
1499 struct page *page;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001500 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001501
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001502 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001503
Arun Siluvery5e60d792015-06-23 15:50:44 +01001504 /* update this when WA for higher Gen are added */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001505 if (INTEL_INFO(engine->dev)->gen > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001506 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001507 INTEL_INFO(engine->dev)->gen);
Arun Siluvery5e60d792015-06-23 15:50:44 +01001508 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001509 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001510
Arun Siluveryc4db7592015-06-19 18:37:11 +01001511 /* some WA perform writes to scratch page, ensure it is valid */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001512 if (engine->scratch.obj == NULL) {
1513 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001514 return -EINVAL;
1515 }
1516
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001517 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001518 if (ret) {
1519 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1520 return ret;
1521 }
1522
Dave Gordon033908a2015-12-10 18:51:23 +00001523 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001524 batch = kmap_atomic(page);
1525 offset = 0;
1526
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001527 if (INTEL_INFO(engine->dev)->gen == 8) {
1528 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001529 &wa_ctx->indirect_ctx,
1530 batch,
1531 &offset);
1532 if (ret)
1533 goto out;
1534
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001535 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001536 &wa_ctx->per_ctx,
1537 batch,
1538 &offset);
1539 if (ret)
1540 goto out;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001541 } else if (INTEL_INFO(engine->dev)->gen == 9) {
1542 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001543 &wa_ctx->indirect_ctx,
1544 batch,
1545 &offset);
1546 if (ret)
1547 goto out;
1548
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001549 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001550 &wa_ctx->per_ctx,
1551 batch,
1552 &offset);
1553 if (ret)
1554 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001555 }
1556
1557out:
1558 kunmap_atomic(batch);
1559 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001560 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001561
1562 return ret;
1563}
1564
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001565static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001566{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001567 struct drm_device *dev = engine->dev;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001568 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00001569 unsigned int next_context_status_buffer_hw;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001570
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001571 lrc_setup_hardware_status_page(engine,
1572 dev_priv->kernel_context->engine[engine->id].state);
Nick Hoathe84fe802015-09-11 12:53:46 +01001573
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001574 I915_WRITE_IMR(engine,
1575 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1576 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001577
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001578 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001579 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1580 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001581 POSTING_READ(RING_MODE_GEN7(engine));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001582
1583 /*
1584 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1585 * zero, we need to read the write pointer from hardware and use its
1586 * value because "this register is power context save restored".
1587 * Effectively, these states have been observed:
1588 *
1589 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1590 * BDW | CSB regs not reset | CSB regs reset |
1591 * CHT | CSB regs not reset | CSB regs not reset |
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001592 * SKL | ? | ? |
1593 * BXT | ? | ? |
Michel Thierrydfc53c52015-09-28 13:25:12 +01001594 */
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001595 next_context_status_buffer_hw =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001596 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001597
1598 /*
1599 * When the CSB registers are reset (also after power-up / gpu reset),
1600 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1601 * this special case, so the first element read is CSB[0].
1602 */
1603 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1604 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1605
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001606 engine->next_context_status_buffer = next_context_status_buffer_hw;
1607 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001608
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001609 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001610
1611 return 0;
1612}
1613
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001614static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001615{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001616 struct drm_device *dev = engine->dev;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001617 struct drm_i915_private *dev_priv = dev->dev_private;
1618 int ret;
1619
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001620 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001621 if (ret)
1622 return ret;
1623
1624 /* We need to disable the AsyncFlip performance optimisations in order
1625 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1626 * programmed to '1' on all products.
1627 *
1628 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1629 */
1630 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1631
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001632 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1633
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001634 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001635}
1636
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001637static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001638{
1639 int ret;
1640
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001641 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001642 if (ret)
1643 return ret;
1644
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001645 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001646}
1647
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001648static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1649{
1650 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001651 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001652 struct intel_ringbuffer *ringbuf = req->ringbuf;
1653 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1654 int i, ret;
1655
1656 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
1657 if (ret)
1658 return ret;
1659
1660 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1661 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1662 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1663
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001664 intel_logical_ring_emit_reg(ringbuf,
1665 GEN8_RING_PDP_UDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001666 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001667 intel_logical_ring_emit_reg(ringbuf,
1668 GEN8_RING_PDP_LDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001669 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1670 }
1671
1672 intel_logical_ring_emit(ringbuf, MI_NOOP);
1673 intel_logical_ring_advance(ringbuf);
1674
1675 return 0;
1676}
1677
John Harrisonbe795fc2015-05-29 17:44:03 +01001678static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
John Harrison8e004ef2015-02-13 11:48:10 +00001679 u64 offset, unsigned dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001680{
John Harrisonbe795fc2015-05-29 17:44:03 +01001681 struct intel_ringbuffer *ringbuf = req->ringbuf;
John Harrison8e004ef2015-02-13 11:48:10 +00001682 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Oscar Mateo15648582014-07-24 17:04:32 +01001683 int ret;
1684
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001685 /* Don't rely in hw updating PDPs, specially in lite-restore.
1686 * Ideally, we should set Force PD Restore in ctx descriptor,
1687 * but we can't. Force Restore would be a second option, but
1688 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001689 * not idle). PML4 is allocated during ppgtt init so this is
1690 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001691 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001692 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001693 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1694 !intel_vgpu_active(req->i915->dev)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001695 ret = intel_logical_ring_emit_pdps(req);
1696 if (ret)
1697 return ret;
1698 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001699
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001700 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001701 }
1702
John Harrison4d616a22015-05-29 17:44:08 +01001703 ret = intel_logical_ring_begin(req, 4);
Oscar Mateo15648582014-07-24 17:04:32 +01001704 if (ret)
1705 return ret;
1706
1707 /* FIXME(BDW): Address space and security selectors. */
Abdiel Janulgue69225282015-06-16 13:39:42 +03001708 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1709 (ppgtt<<8) |
1710 (dispatch_flags & I915_DISPATCH_RS ?
1711 MI_BATCH_RESOURCE_STREAMER : 0));
Oscar Mateo15648582014-07-24 17:04:32 +01001712 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1713 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1714 intel_logical_ring_emit(ringbuf, MI_NOOP);
1715 intel_logical_ring_advance(ringbuf);
1716
1717 return 0;
1718}
1719
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001720static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001721{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001722 struct drm_device *dev = engine->dev;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001723 struct drm_i915_private *dev_priv = dev->dev_private;
1724 unsigned long flags;
1725
Daniel Vetter7cd512f2014-09-15 11:38:57 +02001726 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Oscar Mateo73d477f2014-07-24 17:04:31 +01001727 return false;
1728
1729 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001730 if (engine->irq_refcount++ == 0) {
1731 I915_WRITE_IMR(engine,
1732 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1733 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001734 }
1735 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1736
1737 return true;
1738}
1739
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001740static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001741{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001742 struct drm_device *dev = engine->dev;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001743 struct drm_i915_private *dev_priv = dev->dev_private;
1744 unsigned long flags;
1745
1746 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001747 if (--engine->irq_refcount == 0) {
1748 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1749 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001750 }
1751 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1752}
1753
John Harrison7deb4d32015-05-29 17:43:59 +01001754static int gen8_emit_flush(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001755 u32 invalidate_domains,
1756 u32 unused)
1757{
John Harrison7deb4d32015-05-29 17:43:59 +01001758 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001759 struct intel_engine_cs *engine = ringbuf->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001760 struct drm_device *dev = engine->dev;
Oscar Mateo47122742014-07-24 17:04:28 +01001761 struct drm_i915_private *dev_priv = dev->dev_private;
1762 uint32_t cmd;
1763 int ret;
1764
John Harrison4d616a22015-05-29 17:44:08 +01001765 ret = intel_logical_ring_begin(request, 4);
Oscar Mateo47122742014-07-24 17:04:28 +01001766 if (ret)
1767 return ret;
1768
1769 cmd = MI_FLUSH_DW + 1;
1770
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001771 /* We always require a command barrier so that subsequent
1772 * commands, such as breadcrumb interrupts, are strictly ordered
1773 * wrt the contents of the write cache being flushed to memory
1774 * (and thus being coherent from the CPU).
1775 */
1776 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1777
1778 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1779 cmd |= MI_INVALIDATE_TLB;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001780 if (engine == &dev_priv->engine[VCS])
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001781 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001782 }
1783
1784 intel_logical_ring_emit(ringbuf, cmd);
1785 intel_logical_ring_emit(ringbuf,
1786 I915_GEM_HWS_SCRATCH_ADDR |
1787 MI_FLUSH_DW_USE_GTT);
1788 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1789 intel_logical_ring_emit(ringbuf, 0); /* value */
1790 intel_logical_ring_advance(ringbuf);
1791
1792 return 0;
1793}
1794
John Harrison7deb4d32015-05-29 17:43:59 +01001795static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001796 u32 invalidate_domains,
1797 u32 flush_domains)
1798{
John Harrison7deb4d32015-05-29 17:43:59 +01001799 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001800 struct intel_engine_cs *engine = ringbuf->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001801 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001802 bool vf_flush_wa = false;
Oscar Mateo47122742014-07-24 17:04:28 +01001803 u32 flags = 0;
1804 int ret;
1805
1806 flags |= PIPE_CONTROL_CS_STALL;
1807
1808 if (flush_domains) {
1809 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1810 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001811 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001812 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001813 }
1814
1815 if (invalidate_domains) {
1816 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1817 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1818 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1819 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1820 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1821 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1822 flags |= PIPE_CONTROL_QW_WRITE;
1823 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001824
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001825 /*
1826 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1827 * pipe control.
1828 */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001829 if (IS_GEN9(engine->dev))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001830 vf_flush_wa = true;
1831 }
Imre Deak9647ff32015-01-25 13:27:11 -08001832
John Harrison4d616a22015-05-29 17:44:08 +01001833 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
Oscar Mateo47122742014-07-24 17:04:28 +01001834 if (ret)
1835 return ret;
1836
Imre Deak9647ff32015-01-25 13:27:11 -08001837 if (vf_flush_wa) {
1838 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1839 intel_logical_ring_emit(ringbuf, 0);
1840 intel_logical_ring_emit(ringbuf, 0);
1841 intel_logical_ring_emit(ringbuf, 0);
1842 intel_logical_ring_emit(ringbuf, 0);
1843 intel_logical_ring_emit(ringbuf, 0);
1844 }
1845
Oscar Mateo47122742014-07-24 17:04:28 +01001846 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1847 intel_logical_ring_emit(ringbuf, flags);
1848 intel_logical_ring_emit(ringbuf, scratch_addr);
1849 intel_logical_ring_emit(ringbuf, 0);
1850 intel_logical_ring_emit(ringbuf, 0);
1851 intel_logical_ring_emit(ringbuf, 0);
1852 intel_logical_ring_advance(ringbuf);
1853
1854 return 0;
1855}
1856
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001857static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001858{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001859 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001860}
1861
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001862static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001863{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001864 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001865}
1866
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001867static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
1868 bool lazy_coherency)
Imre Deak319404d2015-08-14 18:35:27 +03001869{
1870
1871 /*
1872 * On BXT A steppings there is a HW coherency issue whereby the
1873 * MI_STORE_DATA_IMM storing the completed request's seqno
1874 * occasionally doesn't invalidate the CPU cache. Work around this by
1875 * clflushing the corresponding cacheline whenever the caller wants
1876 * the coherency to be guaranteed. Note that this cacheline is known
1877 * to be clean at this point, since we only write it in
1878 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1879 * this clflush in practice becomes an invalidate operation.
1880 */
1881
1882 if (!lazy_coherency)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001883 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001884
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001885 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001886}
1887
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001888static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Imre Deak319404d2015-08-14 18:35:27 +03001889{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001890 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Imre Deak319404d2015-08-14 18:35:27 +03001891
1892 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001893 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001894}
1895
Chris Wilson7c17d372016-01-20 15:43:35 +02001896/*
1897 * Reserve space for 2 NOOPs at the end of each request to be
1898 * used as a workaround for not being allowed to do lite
1899 * restore with HEAD==TAIL (WaIdleLiteRestore).
1900 */
1901#define WA_TAIL_DWORDS 2
1902
1903static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1904{
1905 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1906}
1907
John Harrisonc4e76632015-05-29 17:44:01 +01001908static int gen8_emit_request(struct drm_i915_gem_request *request)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001909{
John Harrisonc4e76632015-05-29 17:44:01 +01001910 struct intel_ringbuffer *ringbuf = request->ringbuf;
Oscar Mateo4da46e12014-07-24 17:04:27 +01001911 int ret;
1912
Chris Wilson7c17d372016-01-20 15:43:35 +02001913 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001914 if (ret)
1915 return ret;
1916
Chris Wilson7c17d372016-01-20 15:43:35 +02001917 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1918 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001919
Oscar Mateo4da46e12014-07-24 17:04:27 +01001920 intel_logical_ring_emit(ringbuf,
Chris Wilson7c17d372016-01-20 15:43:35 +02001921 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1922 intel_logical_ring_emit(ringbuf,
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001923 hws_seqno_address(request->engine) |
Chris Wilson7c17d372016-01-20 15:43:35 +02001924 MI_FLUSH_DW_USE_GTT);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001925 intel_logical_ring_emit(ringbuf, 0);
John Harrisonc4e76632015-05-29 17:44:01 +01001926 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001927 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1928 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001929 return intel_logical_ring_advance_and_submit(request);
1930}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001931
Chris Wilson7c17d372016-01-20 15:43:35 +02001932static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1933{
1934 struct intel_ringbuffer *ringbuf = request->ringbuf;
1935 int ret;
1936
1937 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
1938 if (ret)
1939 return ret;
1940
1941 /* w/a for post sync ops following a GPGPU operation we
1942 * need a prior CS_STALL, which is emitted by the flush
1943 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001944 */
Chris Wilson7c17d372016-01-20 15:43:35 +02001945 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
1946 intel_logical_ring_emit(ringbuf,
1947 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1948 PIPE_CONTROL_CS_STALL |
1949 PIPE_CONTROL_QW_WRITE));
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001950 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
Chris Wilson7c17d372016-01-20 15:43:35 +02001951 intel_logical_ring_emit(ringbuf, 0);
1952 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1953 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1954 return intel_logical_ring_advance_and_submit(request);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001955}
1956
John Harrisonbe013632015-05-29 17:43:45 +01001957static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
Damien Lespiaucef437a2015-02-10 19:32:19 +00001958{
Damien Lespiaucef437a2015-02-10 19:32:19 +00001959 struct render_state so;
Damien Lespiaucef437a2015-02-10 19:32:19 +00001960 int ret;
1961
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001962 ret = i915_gem_render_state_prepare(req->engine, &so);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001963 if (ret)
1964 return ret;
1965
1966 if (so.rodata == NULL)
1967 return 0;
1968
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001969 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
John Harrisonbe013632015-05-29 17:43:45 +01001970 I915_DISPATCH_SECURE);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001971 if (ret)
1972 goto out;
1973
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001974 ret = req->engine->emit_bb_start(req,
Arun Siluvery84e81022015-07-20 10:46:10 +01001975 (so.ggtt_offset + so.aux_batch_offset),
1976 I915_DISPATCH_SECURE);
1977 if (ret)
1978 goto out;
1979
John Harrisonb2af0372015-05-29 17:43:50 +01001980 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001981
Damien Lespiaucef437a2015-02-10 19:32:19 +00001982out:
1983 i915_gem_render_state_fini(&so);
1984 return ret;
1985}
1986
John Harrison87531812015-05-29 17:43:44 +01001987static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001988{
1989 int ret;
1990
John Harrisone2be4fa2015-05-29 17:43:54 +01001991 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001992 if (ret)
1993 return ret;
1994
Peter Antoine3bbaba02015-07-10 20:13:11 +03001995 ret = intel_rcs_context_init_mocs(req);
1996 /*
1997 * Failing to program the MOCS is non-fatal.The system will not
1998 * run at peak performance. So generate an error and carry on.
1999 */
2000 if (ret)
2001 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
2002
John Harrisonbe013632015-05-29 17:43:45 +01002003 return intel_lr_context_render_state_init(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00002004}
2005
Oscar Mateo73e4d072014-07-24 17:04:48 +01002006/**
2007 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
2008 *
2009 * @ring: Engine Command Streamer.
2010 *
2011 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002012void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01002013{
John Harrison6402c332014-10-31 12:00:26 +00002014 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01002015
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002016 if (!intel_engine_initialized(engine))
Oscar Mateo48d82382014-07-24 17:04:23 +01002017 return;
2018
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002019 dev_priv = engine->dev->dev_private;
John Harrison6402c332014-10-31 12:00:26 +00002020
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002021 if (engine->buffer) {
2022 intel_logical_ring_stop(engine);
2023 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00002024 }
Oscar Mateo48d82382014-07-24 17:04:23 +01002025
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002026 if (engine->cleanup)
2027 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01002028
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002029 i915_cmd_parser_fini_ring(engine);
2030 i915_gem_batch_pool_fini(&engine->batch_pool);
Oscar Mateo48d82382014-07-24 17:04:23 +01002031
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002032 if (engine->status_page.obj) {
2033 kunmap(sg_page(engine->status_page.obj->pages->sgl));
2034 engine->status_page.obj = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01002035 }
Arun Siluvery17ee9502015-06-19 19:07:01 +01002036
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002037 engine->idle_lite_restore_wa = 0;
2038 engine->disable_lite_restore_wa = false;
2039 engine->ctx_desc_template = 0;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00002040
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002041 lrc_destroy_wa_ctx_obj(engine);
2042 engine->dev = NULL;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002043}
2044
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002045static void
2046logical_ring_default_vfuncs(struct drm_device *dev,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002047 struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002048{
2049 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002050 engine->init_hw = gen8_init_common_ring;
2051 engine->emit_request = gen8_emit_request;
2052 engine->emit_flush = gen8_emit_flush;
2053 engine->irq_get = gen8_logical_ring_get_irq;
2054 engine->irq_put = gen8_logical_ring_put_irq;
2055 engine->emit_bb_start = gen8_emit_bb_start;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002056 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002057 engine->get_seqno = bxt_a_get_seqno;
2058 engine->set_seqno = bxt_a_set_seqno;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002059 } else {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002060 engine->get_seqno = gen8_get_seqno;
2061 engine->set_seqno = gen8_set_seqno;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002062 }
2063}
2064
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00002065static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002066logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00002067{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002068 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2069 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00002070}
2071
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002072static int
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002073logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01002074{
Dave Gordoned54c1a2016-01-19 19:02:54 +00002075 struct intel_context *dctx = to_i915(dev)->kernel_context;
Oscar Mateo48d82382014-07-24 17:04:23 +01002076 int ret;
Oscar Mateo48d82382014-07-24 17:04:23 +01002077
2078 /* Intentionally left blank. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002079 engine->buffer = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01002080
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002081 engine->dev = dev;
2082 INIT_LIST_HEAD(&engine->active_list);
2083 INIT_LIST_HEAD(&engine->request_list);
2084 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2085 init_waitqueue_head(&engine->irq_queue);
Oscar Mateo48d82382014-07-24 17:04:23 +01002086
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002087 INIT_LIST_HEAD(&engine->buffers);
2088 INIT_LIST_HEAD(&engine->execlist_queue);
2089 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2090 spin_lock_init(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +01002091
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002092 logical_ring_init_platform_invariants(engine);
Tvrtko Ursulinca825802016-01-15 15:10:27 +00002093
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002094 ret = i915_cmd_parser_init_ring(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01002095 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002096 goto error;
Oscar Mateo48d82382014-07-24 17:04:23 +01002097
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002098 ret = intel_lr_context_deferred_alloc(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002099 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002100 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002101
2102 /* As this is the default context, always pin it */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002103 ret = intel_lr_context_do_pin(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002104 if (ret) {
2105 DRM_ERROR(
2106 "Failed to pin and map ringbuffer %s: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002107 engine->name, ret);
Dave Gordonb0366a52015-12-08 15:02:36 +00002108 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002109 }
Oscar Mateo564ddb22014-08-21 11:40:54 +01002110
Dave Gordonb0366a52015-12-08 15:02:36 +00002111 return 0;
2112
2113error:
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002114 intel_logical_ring_cleanup(engine);
Oscar Mateo564ddb22014-08-21 11:40:54 +01002115 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002116}
2117
2118static int logical_render_ring_init(struct drm_device *dev)
2119{
2120 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002121 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
Daniel Vetter99be1df2014-11-20 00:33:06 +01002122 int ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002123
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002124 engine->name = "render ring";
2125 engine->id = RCS;
2126 engine->exec_id = I915_EXEC_RENDER;
2127 engine->guc_id = GUC_RENDER_ENGINE;
2128 engine->mmio_base = RENDER_RING_BASE;
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00002129
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002130 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
Oscar Mateo73d477f2014-07-24 17:04:31 +01002131 if (HAS_L3_DPF(dev))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002132 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002133
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002134 logical_ring_default_vfuncs(dev, engine);
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002135
2136 /* Override some for render ring. */
Damien Lespiau82ef8222015-02-09 19:33:08 +00002137 if (INTEL_INFO(dev)->gen >= 9)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002138 engine->init_hw = gen9_init_render_ring;
Damien Lespiau82ef8222015-02-09 19:33:08 +00002139 else
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002140 engine->init_hw = gen8_init_render_ring;
2141 engine->init_context = gen8_init_rcs_context;
2142 engine->cleanup = intel_fini_pipe_control;
2143 engine->emit_flush = gen8_emit_flush_render;
2144 engine->emit_request = gen8_emit_request_render;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002145
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002146 engine->dev = dev;
Arun Siluveryc4db7592015-06-19 18:37:11 +01002147
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002148 ret = intel_init_pipe_control(engine);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002149 if (ret)
2150 return ret;
2151
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002152 ret = intel_init_workaround_bb(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01002153 if (ret) {
2154 /*
2155 * We continue even if we fail to initialize WA batch
2156 * because we only expect rare glitches but nothing
2157 * critical to prevent us from using GPU
2158 */
2159 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2160 ret);
2161 }
2162
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002163 ret = logical_ring_init(dev, engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002164 if (ret) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002165 lrc_destroy_wa_ctx_obj(engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002166 }
Arun Siluvery17ee9502015-06-19 19:07:01 +01002167
2168 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002169}
2170
2171static int logical_bsd_ring_init(struct drm_device *dev)
2172{
2173 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002174 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
Oscar Mateo454afeb2014-07-24 17:04:22 +01002175
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002176 engine->name = "bsd ring";
2177 engine->id = VCS;
2178 engine->exec_id = I915_EXEC_BSD;
2179 engine->guc_id = GUC_VIDEO_ENGINE;
2180 engine->mmio_base = GEN6_BSD_RING_BASE;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002181
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002182 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2183 logical_ring_default_vfuncs(dev, engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002184
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002185 return logical_ring_init(dev, engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002186}
2187
2188static int logical_bsd2_ring_init(struct drm_device *dev)
2189{
2190 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002191 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
Oscar Mateo454afeb2014-07-24 17:04:22 +01002192
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002193 engine->name = "bsd2 ring";
2194 engine->id = VCS2;
2195 engine->exec_id = I915_EXEC_BSD;
2196 engine->guc_id = GUC_VIDEO_ENGINE2;
2197 engine->mmio_base = GEN8_BSD2_RING_BASE;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002198
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002199 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2200 logical_ring_default_vfuncs(dev, engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002201
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002202 return logical_ring_init(dev, engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002203}
2204
2205static int logical_blt_ring_init(struct drm_device *dev)
2206{
2207 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002208 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
Oscar Mateo454afeb2014-07-24 17:04:22 +01002209
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002210 engine->name = "blitter ring";
2211 engine->id = BCS;
2212 engine->exec_id = I915_EXEC_BLT;
2213 engine->guc_id = GUC_BLITTER_ENGINE;
2214 engine->mmio_base = BLT_RING_BASE;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002215
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002216 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2217 logical_ring_default_vfuncs(dev, engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002218
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002219 return logical_ring_init(dev, engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002220}
2221
2222static int logical_vebox_ring_init(struct drm_device *dev)
2223{
2224 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002225 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
Oscar Mateo454afeb2014-07-24 17:04:22 +01002226
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002227 engine->name = "video enhancement ring";
2228 engine->id = VECS;
2229 engine->exec_id = I915_EXEC_VEBOX;
2230 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2231 engine->mmio_base = VEBOX_RING_BASE;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002232
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002233 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
2234 logical_ring_default_vfuncs(dev, engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002235
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002236 return logical_ring_init(dev, engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002237}
2238
Oscar Mateo73e4d072014-07-24 17:04:48 +01002239/**
2240 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2241 * @dev: DRM device.
2242 *
2243 * This function inits the engines for an Execlists submission style (the equivalent in the
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002244 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
Oscar Mateo73e4d072014-07-24 17:04:48 +01002245 * those engines that are present in the hardware.
2246 *
2247 * Return: non-zero if the initialization failed.
2248 */
Oscar Mateo454afeb2014-07-24 17:04:22 +01002249int intel_logical_rings_init(struct drm_device *dev)
2250{
2251 struct drm_i915_private *dev_priv = dev->dev_private;
2252 int ret;
2253
2254 ret = logical_render_ring_init(dev);
2255 if (ret)
2256 return ret;
2257
2258 if (HAS_BSD(dev)) {
2259 ret = logical_bsd_ring_init(dev);
2260 if (ret)
2261 goto cleanup_render_ring;
2262 }
2263
2264 if (HAS_BLT(dev)) {
2265 ret = logical_blt_ring_init(dev);
2266 if (ret)
2267 goto cleanup_bsd_ring;
2268 }
2269
2270 if (HAS_VEBOX(dev)) {
2271 ret = logical_vebox_ring_init(dev);
2272 if (ret)
2273 goto cleanup_blt_ring;
2274 }
2275
2276 if (HAS_BSD2(dev)) {
2277 ret = logical_bsd2_ring_init(dev);
2278 if (ret)
2279 goto cleanup_vebox_ring;
2280 }
2281
Oscar Mateo454afeb2014-07-24 17:04:22 +01002282 return 0;
2283
Oscar Mateo454afeb2014-07-24 17:04:22 +01002284cleanup_vebox_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002285 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002286cleanup_blt_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002287 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002288cleanup_bsd_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002289 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002290cleanup_render_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002291 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002292
2293 return ret;
2294}
2295
Jeff McGee0cea6502015-02-13 10:27:56 -06002296static u32
2297make_rpcs(struct drm_device *dev)
2298{
2299 u32 rpcs = 0;
2300
2301 /*
2302 * No explicit RPCS request is needed to ensure full
2303 * slice/subslice/EU enablement prior to Gen9.
2304 */
2305 if (INTEL_INFO(dev)->gen < 9)
2306 return 0;
2307
2308 /*
2309 * Starting in Gen9, render power gating can leave
2310 * slice/subslice/EU in a partially enabled state. We
2311 * must make an explicit request through RPCS for full
2312 * enablement.
2313 */
2314 if (INTEL_INFO(dev)->has_slice_pg) {
2315 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2316 rpcs |= INTEL_INFO(dev)->slice_total <<
2317 GEN8_RPCS_S_CNT_SHIFT;
2318 rpcs |= GEN8_RPCS_ENABLE;
2319 }
2320
2321 if (INTEL_INFO(dev)->has_subslice_pg) {
2322 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2323 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
2324 GEN8_RPCS_SS_CNT_SHIFT;
2325 rpcs |= GEN8_RPCS_ENABLE;
2326 }
2327
2328 if (INTEL_INFO(dev)->has_eu_pg) {
2329 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2330 GEN8_RPCS_EU_MIN_SHIFT;
2331 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2332 GEN8_RPCS_EU_MAX_SHIFT;
2333 rpcs |= GEN8_RPCS_ENABLE;
2334 }
2335
2336 return rpcs;
2337}
2338
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002339static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00002340{
2341 u32 indirect_ctx_offset;
2342
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002343 switch (INTEL_INFO(engine->dev)->gen) {
Michel Thierry71562912016-02-23 10:31:49 +00002344 default:
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002345 MISSING_CASE(INTEL_INFO(engine->dev)->gen);
Michel Thierry71562912016-02-23 10:31:49 +00002346 /* fall through */
2347 case 9:
2348 indirect_ctx_offset =
2349 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2350 break;
2351 case 8:
2352 indirect_ctx_offset =
2353 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2354 break;
2355 }
2356
2357 return indirect_ctx_offset;
2358}
2359
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002360static int
2361populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002362 struct intel_engine_cs *engine,
2363 struct intel_ringbuffer *ringbuf)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002364{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002365 struct drm_device *dev = engine->dev;
Thomas Daniel2d965532014-08-19 10:13:36 +01002366 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterae6c48062014-08-06 15:04:53 +02002367 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002368 struct page *page;
2369 uint32_t *reg_state;
2370 int ret;
2371
Thomas Daniel2d965532014-08-19 10:13:36 +01002372 if (!ppgtt)
2373 ppgtt = dev_priv->mm.aliasing_ppgtt;
2374
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002375 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2376 if (ret) {
2377 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2378 return ret;
2379 }
2380
2381 ret = i915_gem_object_get_pages(ctx_obj);
2382 if (ret) {
2383 DRM_DEBUG_DRIVER("Could not get object pages\n");
2384 return ret;
2385 }
2386
2387 i915_gem_object_pin_pages(ctx_obj);
2388
2389 /* The second page of the context object contains some fields which must
2390 * be set up prior to the first execution. */
Dave Gordon033908a2015-12-10 18:51:23 +00002391 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002392 reg_state = kmap_atomic(page);
2393
2394 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2395 * commands followed by (reg, value) pairs. The values we are setting here are
2396 * only for the first context restore: on a subsequent save, the GPU will
2397 * recreate this batchbuffer with new values (including all the missing
2398 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002399 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002400 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2401 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2402 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002403 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2404 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Michel Thierry99cf8ea2016-02-25 09:48:58 +00002405 (HAS_RESOURCE_STREAMER(dev) ?
2406 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002407 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2408 0);
2409 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2410 0);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002411 /* Ring buffer start address is not known until the buffer is pinned.
2412 * It is written to the context image in execlists_update_context()
2413 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002414 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2415 RING_START(engine->mmio_base), 0);
2416 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2417 RING_CTL(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002418 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002419 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2420 RING_BBADDR_UDW(engine->mmio_base), 0);
2421 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2422 RING_BBADDR(engine->mmio_base), 0);
2423 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2424 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002425 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002426 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2427 RING_SBBADDR_UDW(engine->mmio_base), 0);
2428 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2429 RING_SBBADDR(engine->mmio_base), 0);
2430 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2431 RING_SBBSTATE(engine->mmio_base), 0);
2432 if (engine->id == RCS) {
2433 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2434 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2435 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2436 RING_INDIRECT_CTX(engine->mmio_base), 0);
2437 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2438 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2439 if (engine->wa_ctx.obj) {
2440 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002441 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2442
2443 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2444 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2445 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2446
2447 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002448 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002449
2450 reg_state[CTX_BB_PER_CTX_PTR+1] =
2451 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2452 0x01;
2453 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002454 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002455 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002456 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2457 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002458 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002459 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2460 0);
2461 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2462 0);
2463 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2464 0);
2465 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2466 0);
2467 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2468 0);
2469 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2470 0);
2471 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2472 0);
2473 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2474 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002475
Michel Thierry2dba3232015-07-30 11:06:23 +01002476 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2477 /* 64b PPGTT (48bit canonical)
2478 * PDP0_DESCRIPTOR contains the base address to PML4 and
2479 * other PDP Descriptors are ignored.
2480 */
2481 ASSIGN_CTX_PML4(ppgtt, reg_state);
2482 } else {
2483 /* 32b PPGTT
2484 * PDP*_DESCRIPTOR contains the base address of space supported.
2485 * With dynamic page allocation, PDPs may not be allocated at
2486 * this point. Point the unallocated PDPs to the scratch page
2487 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00002488 execlists_update_context_pdps(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002489 }
2490
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002491 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002492 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002493 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2494 make_rpcs(dev));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002495 }
2496
2497 kunmap_atomic(reg_state);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002498 i915_gem_object_unpin_pages(ctx_obj);
2499
2500 return 0;
2501}
2502
Oscar Mateo73e4d072014-07-24 17:04:48 +01002503/**
2504 * intel_lr_context_free() - free the LRC specific bits of a context
2505 * @ctx: the LR context to free.
2506 *
2507 * The real context freeing is done in i915_gem_context_free: this only
2508 * takes care of the bits that are LRC related: the per-engine backing
2509 * objects and the logical ringbuffer.
2510 */
Oscar Mateoede7d422014-07-24 17:04:12 +01002511void intel_lr_context_free(struct intel_context *ctx)
2512{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002513 int i;
2514
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002515 for (i = I915_NUM_ENGINES; --i >= 0; ) {
Dave Gordone28e4042016-01-19 19:02:55 +00002516 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002517 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
Oscar Mateo84c23772014-07-24 17:04:15 +01002518
Dave Gordone28e4042016-01-19 19:02:55 +00002519 if (!ctx_obj)
2520 continue;
Oscar Mateodcb4c122014-11-13 10:28:10 +00002521
Dave Gordone28e4042016-01-19 19:02:55 +00002522 if (ctx == ctx->i915->kernel_context) {
2523 intel_unpin_ringbuffer_obj(ringbuf);
2524 i915_gem_object_ggtt_unpin(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002525 }
Dave Gordone28e4042016-01-19 19:02:55 +00002526
2527 WARN_ON(ctx->engine[i].pin_count);
2528 intel_ringbuffer_free(ringbuf);
2529 drm_gem_object_unreference(&ctx_obj->base);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002530 }
2531}
2532
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002533/**
2534 * intel_lr_context_size() - return the size of the context for an engine
2535 * @ring: which engine to find the context size for
2536 *
2537 * Each engine may require a different amount of space for a context image,
2538 * so when allocating (or copying) an image, this function can be used to
2539 * find the right size for the specific engine.
2540 *
2541 * Return: size (in bytes) of an engine-specific context image
2542 *
2543 * Note: this size includes the HWSP, which is part of the context image
2544 * in LRC mode, but does not include the "shared data page" used with
2545 * GuC submission. The caller should account for this if using the GuC.
2546 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002547uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002548{
2549 int ret = 0;
2550
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002551 WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002552
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002553 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002554 case RCS:
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002555 if (INTEL_INFO(engine->dev)->gen >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002556 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2557 else
2558 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002559 break;
2560 case VCS:
2561 case BCS:
2562 case VECS:
2563 case VCS2:
2564 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2565 break;
2566 }
2567
2568 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002569}
2570
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002571static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
2572 struct drm_i915_gem_object *default_ctx_obj)
Thomas Daniel1df06b72014-10-29 09:52:51 +00002573{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002574 struct drm_i915_private *dev_priv = engine->dev->dev_private;
Alex Daid1675192015-08-12 15:43:43 +01002575 struct page *page;
Thomas Daniel1df06b72014-10-29 09:52:51 +00002576
Alex Daid1675192015-08-12 15:43:43 +01002577 /* The HWSP is part of the default context object in LRC mode. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002578 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
Alex Daid1675192015-08-12 15:43:43 +01002579 + LRC_PPHWSP_PN * PAGE_SIZE;
2580 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002581 engine->status_page.page_addr = kmap(page);
2582 engine->status_page.obj = default_ctx_obj;
Thomas Daniel1df06b72014-10-29 09:52:51 +00002583
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002584 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
2585 (u32)engine->status_page.gfx_addr);
2586 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
Thomas Daniel1df06b72014-10-29 09:52:51 +00002587}
2588
Oscar Mateo73e4d072014-07-24 17:04:48 +01002589/**
Nick Hoathe84fe802015-09-11 12:53:46 +01002590 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
Oscar Mateo73e4d072014-07-24 17:04:48 +01002591 * @ctx: LR context to create.
2592 * @ring: engine to be used with the context.
2593 *
2594 * This function can be called more than once, with different engines, if we plan
2595 * to use the context with them. The context backing objects and the ringbuffers
2596 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2597 * the creation is a deferred call: it's better to make sure first that we need to use
2598 * a given ring with the context.
2599 *
Masanari Iida32197aa2014-10-20 23:53:13 +09002600 * Return: non-zero on error.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002601 */
Nick Hoathe84fe802015-09-11 12:53:46 +01002602
2603int intel_lr_context_deferred_alloc(struct intel_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002604 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002605{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002606 struct drm_device *dev = engine->dev;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002607 struct drm_i915_gem_object *ctx_obj;
2608 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01002609 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002610 int ret;
2611
Oscar Mateoede7d422014-07-24 17:04:12 +01002612 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002613 WARN_ON(ctx->engine[engine->id].state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002614
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002615 context_size = round_up(intel_lr_context_size(engine), 4096);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002616
Alex Daid1675192015-08-12 15:43:43 +01002617 /* One extra page as the sharing data between driver and GuC */
2618 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2619
Chris Wilson149c86e2015-04-07 16:21:11 +01002620 ctx_obj = i915_gem_alloc_object(dev, context_size);
Dan Carpenter3126a662015-04-30 17:30:50 +03002621 if (!ctx_obj) {
2622 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2623 return -ENOMEM;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002624 }
2625
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002626 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
Chris Wilson01101fa2015-09-03 13:01:39 +01002627 if (IS_ERR(ringbuf)) {
2628 ret = PTR_ERR(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002629 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002630 }
2631
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002632 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002633 if (ret) {
2634 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Nick Hoathe84fe802015-09-11 12:53:46 +01002635 goto error_ringbuf;
Oscar Mateo84c23772014-07-24 17:04:15 +01002636 }
2637
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002638 ctx->engine[engine->id].ringbuf = ringbuf;
2639 ctx->engine[engine->id].state = ctx_obj;
Oscar Mateoede7d422014-07-24 17:04:12 +01002640
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002641 if (ctx != ctx->i915->kernel_context && engine->init_context) {
Nick Hoathe84fe802015-09-11 12:53:46 +01002642 struct drm_i915_gem_request *req;
John Harrison76c39162015-05-29 17:43:43 +01002643
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002644 req = i915_gem_request_alloc(engine, ctx);
Dave Gordon26827082016-01-19 19:02:53 +00002645 if (IS_ERR(req)) {
2646 ret = PTR_ERR(req);
2647 DRM_ERROR("ring create req: %d\n", ret);
Nick Hoathe84fe802015-09-11 12:53:46 +01002648 goto error_ringbuf;
Michel Thierry771b9a52014-11-11 16:47:33 +00002649 }
2650
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002651 ret = engine->init_context(req);
Nick Hoathe84fe802015-09-11 12:53:46 +01002652 if (ret) {
2653 DRM_ERROR("ring init context: %d\n",
2654 ret);
2655 i915_gem_request_cancel(req);
2656 goto error_ringbuf;
2657 }
2658 i915_add_request_no_flush(req);
Oscar Mateo564ddb22014-08-21 11:40:54 +01002659 }
Oscar Mateoede7d422014-07-24 17:04:12 +01002660 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002661
Chris Wilson01101fa2015-09-03 13:01:39 +01002662error_ringbuf:
2663 intel_ringbuffer_free(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002664error_deref_obj:
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002665 drm_gem_object_unreference(&ctx_obj->base);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002666 ctx->engine[engine->id].ringbuf = NULL;
2667 ctx->engine[engine->id].state = NULL;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002668 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002669}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002670
2671void intel_lr_context_reset(struct drm_device *dev,
2672 struct intel_context *ctx)
2673{
2674 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002675 struct intel_engine_cs *engine;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002676 int i;
2677
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00002678 for_each_engine(engine, dev_priv, i) {
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002679 struct drm_i915_gem_object *ctx_obj =
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002680 ctx->engine[engine->id].state;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002681 struct intel_ringbuffer *ringbuf =
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002682 ctx->engine[engine->id].ringbuf;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002683 uint32_t *reg_state;
2684 struct page *page;
2685
2686 if (!ctx_obj)
2687 continue;
2688
2689 if (i915_gem_object_get_pages(ctx_obj)) {
2690 WARN(1, "Failed get_pages for context obj\n");
2691 continue;
2692 }
Dave Gordon033908a2015-12-10 18:51:23 +00002693 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002694 reg_state = kmap_atomic(page);
2695
2696 reg_state[CTX_RING_HEAD+1] = 0;
2697 reg_state[CTX_RING_TAIL+1] = 0;
2698
2699 kunmap_atomic(reg_state);
2700
2701 ringbuf->head = 0;
2702 ringbuf->tail = 0;
2703 }
2704}