blob: 7c99bbc3e2b8c00c7fad49b9e58f8ed4ae7a63dd [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
Zhi Wange4734052016-05-01 07:42:16 -040048 int i;
49
50 for (i = 0; i < 8; i++)
Xinyun Liu1417fad2018-06-07 22:48:42 +080051 ring_context->pdps[i].val = pdp[7 - i];
Zhi Wange4734052016-05-01 07:42:16 -040052}
53
Zhi Wangb20c0d52018-02-07 18:12:15 +080054static void update_shadow_pdps(struct intel_vgpu_workload *workload)
55{
Zhi Wangb20c0d52018-02-07 18:12:15 +080056 struct drm_i915_gem_object *ctx_obj =
Chris Wilson1fc44d92018-05-17 22:26:32 +010057 workload->req->hw_context->state->obj;
Zhi Wangb20c0d52018-02-07 18:12:15 +080058 struct execlist_ring_context *shadow_ring_context;
59 struct page *page;
60
61 if (WARN_ON(!workload->shadow_mm))
62 return;
63
64 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
65 return;
66
67 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
68 shadow_ring_context = kmap(page);
69 set_context_pdp_root_pointer(shadow_ring_context,
70 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
71 kunmap(page);
72}
73
Min Hefa3dd622018-03-02 10:00:25 +080074/*
75 * when populating shadow ctx from guest, we should not overrride oa related
76 * registers, so that they will not be overlapped by guest oa configs. Thus
77 * made it possible to capture oa data from host for both host and guests.
78 */
79static void sr_oa_regs(struct intel_vgpu_workload *workload,
80 u32 *reg_state, bool save)
81{
82 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
83 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
84 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
85 int i = 0;
86 u32 flex_mmio[] = {
87 i915_mmio_reg_offset(EU_PERF_CNTL0),
88 i915_mmio_reg_offset(EU_PERF_CNTL1),
89 i915_mmio_reg_offset(EU_PERF_CNTL2),
90 i915_mmio_reg_offset(EU_PERF_CNTL3),
91 i915_mmio_reg_offset(EU_PERF_CNTL4),
92 i915_mmio_reg_offset(EU_PERF_CNTL5),
93 i915_mmio_reg_offset(EU_PERF_CNTL6),
94 };
95
Chris Wilson8a68d462019-03-05 18:03:30 +000096 if (workload->ring_id != RCS0)
Min Hefa3dd622018-03-02 10:00:25 +080097 return;
98
99 if (save) {
100 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
101
102 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
103 u32 state_offset = ctx_flexeu0 + i * 2;
104
105 workload->flex_mmio[i] = reg_state[state_offset + 1];
106 }
107 } else {
108 reg_state[ctx_oactxctrl] =
109 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
110 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
111
112 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
113 u32 state_offset = ctx_flexeu0 + i * 2;
114 u32 mmio = flex_mmio[i];
115
116 reg_state[state_offset] = mmio;
117 reg_state[state_offset + 1] = workload->flex_mmio[i];
118 }
119 }
120}
121
Zhi Wange4734052016-05-01 07:42:16 -0400122static int populate_shadow_context(struct intel_vgpu_workload *workload)
123{
124 struct intel_vgpu *vgpu = workload->vgpu;
125 struct intel_gvt *gvt = vgpu->gvt;
126 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400127 struct drm_i915_gem_object *ctx_obj =
Chris Wilson1fc44d92018-05-17 22:26:32 +0100128 workload->req->hw_context->state->obj;
Zhi Wange4734052016-05-01 07:42:16 -0400129 struct execlist_ring_context *shadow_ring_context;
130 struct page *page;
131 void *dst;
132 unsigned long context_gpa, context_page_num;
133 int i;
134
Zhi Wange4734052016-05-01 07:42:16 -0400135 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800136 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400137
Min Hefa3dd622018-03-02 10:00:25 +0800138 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
Zhi Wange4734052016-05-01 07:42:16 -0400139#define COPY_REG(name) \
140 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
141 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
Zhenyu Wangd8303072018-03-19 17:09:05 +0800142#define COPY_REG_MASKED(name) {\
143 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
144 + RING_CTX_OFF(name.val),\
145 &shadow_ring_context->name.val, 4);\
146 shadow_ring_context->name.val |= 0xffff << 16;\
147 }
Zhi Wange4734052016-05-01 07:42:16 -0400148
Zhenyu Wangd8303072018-03-19 17:09:05 +0800149 COPY_REG_MASKED(ctx_ctrl);
Zhi Wange4734052016-05-01 07:42:16 -0400150 COPY_REG(ctx_timestamp);
151
Chris Wilson8a68d462019-03-05 18:03:30 +0000152 if (ring_id == RCS0) {
Zhi Wange4734052016-05-01 07:42:16 -0400153 COPY_REG(bb_per_ctx_ptr);
154 COPY_REG(rcs_indirect_ctx);
155 COPY_REG(rcs_indirect_ctx_offset);
156 }
157#undef COPY_REG
Zhenyu Wangd8303072018-03-19 17:09:05 +0800158#undef COPY_REG_MASKED
Zhi Wange4734052016-05-01 07:42:16 -0400159
Zhi Wange4734052016-05-01 07:42:16 -0400160 intel_gvt_hypervisor_read_gpa(vgpu,
161 workload->ring_context_gpa +
162 sizeof(*shadow_ring_context),
163 (void *)shadow_ring_context +
164 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800165 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400166
Min Hefa3dd622018-03-02 10:00:25 +0800167 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800168 kunmap(page);
Zhao Yan8bfa02c2018-08-01 00:15:31 -0400169
170 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
171 return 0;
172
173 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
174 workload->ctx_desc.lrca);
175
176 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
177
178 context_page_num = context_page_num >> PAGE_SHIFT;
179
Chris Wilson8a68d462019-03-05 18:03:30 +0000180 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
Zhao Yan8bfa02c2018-08-01 00:15:31 -0400181 context_page_num = 19;
182
183 i = 2;
184 while (i < context_page_num) {
185 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
186 (u32)((workload->ctx_desc.lrca + i) <<
187 I915_GTT_PAGE_SHIFT));
188 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
189 gvt_vgpu_err("Invalid guest context descriptor\n");
190 return -EFAULT;
191 }
192
193 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
194 dst = kmap(page);
195 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
196 I915_GTT_PAGE_SIZE);
197 kunmap(page);
198 i++;
199 }
Zhi Wange4734052016-05-01 07:42:16 -0400200 return 0;
201}
202
Chris Wilsone61e0f52018-02-21 09:56:36 +0000203static inline bool is_gvt_request(struct i915_request *req)
Changbin Dubc2d4b62017-03-22 12:35:31 +0800204{
Chris Wilson4e0d64d2018-05-17 22:26:30 +0100205 return i915_gem_context_force_single_submission(req->gem_context);
Changbin Dubc2d4b62017-03-22 12:35:31 +0800206}
207
Xiong Zhang295764c2017-11-07 05:23:02 +0800208static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
209{
210 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
211 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
212 i915_reg_t reg;
213
214 reg = RING_INSTDONE(ring_base);
215 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
216 reg = RING_ACTHD(ring_base);
217 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
218 reg = RING_ACTHD_UDW(ring_base);
219 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
220}
221
Zhi Wange4734052016-05-01 07:42:16 -0400222static int shadow_context_status_change(struct notifier_block *nb,
223 unsigned long action, void *data)
224{
Chris Wilsone61e0f52018-02-21 09:56:36 +0000225 struct i915_request *req = data;
Changbin Du3fc03062017-03-13 10:47:11 +0800226 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
227 shadow_ctx_notifier_block[req->engine->id]);
228 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800229 enum intel_engine_id ring_id = req->engine->id;
230 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800231 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400232
Changbin Du0e86cc92017-05-04 10:52:38 +0800233 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800234 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800235 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
236 scheduler->engine_owner[ring_id]) {
237 /* Switch ring from vGPU to host. */
238 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
239 NULL, ring_id);
240 scheduler->engine_owner[ring_id] = NULL;
241 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800242 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800243
244 return NOTIFY_OK;
245 }
246
247 workload = scheduler->current_workload[ring_id];
248 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800249 return NOTIFY_OK;
250
Zhi Wange4734052016-05-01 07:42:16 -0400251 switch (action) {
252 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800253 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800254 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
255 /* Switch ring from host to vGPU or vGPU to vGPU. */
256 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
257 workload->vgpu, ring_id);
258 scheduler->engine_owner[ring_id] = workload->vgpu;
259 } else
260 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
261 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800262 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400263 atomic_set(&workload->shadow_ctx_active, 1);
264 break;
265 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800266 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400267 atomic_set(&workload->shadow_ctx_active, 0);
268 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800269 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
270 save_ring_hw_state(workload->vgpu, ring_id);
271 break;
Zhi Wange4734052016-05-01 07:42:16 -0400272 default:
273 WARN_ON(1);
274 return NOTIFY_OK;
275 }
276 wake_up(&workload->shadow_ctx_status_wq);
277 return NOTIFY_OK;
278}
279
Chris Wilson1fc44d92018-05-17 22:26:32 +0100280static void shadow_context_descriptor_update(struct intel_context *ce)
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800281{
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800282 u64 desc = 0;
283
284 desc = ce->lrc_desc;
285
286 /* Update bits 0-11 of the context descriptor which includes flags
287 * like GEN8_CTX_* cached in desc_template
288 */
289 desc &= U64_MAX << 12;
Chris Wilson1fc44d92018-05-17 22:26:32 +0100290 desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800291
292 ce->lrc_desc = desc;
293}
294
fred gao0a53bc02017-08-18 15:41:06 +0800295static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
296{
297 struct intel_vgpu *vgpu = workload->vgpu;
Chris Wilson1fc44d92018-05-17 22:26:32 +0100298 struct i915_request *req = workload->req;
fred gao0a53bc02017-08-18 15:41:06 +0800299 void *shadow_ring_buffer_va;
300 u32 *cs;
Weinan Licd7e61b2018-02-23 14:46:45 +0800301
fred gaoc3b5a842019-01-09 09:20:07 +0800302 if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
303 || IS_COFFEELAKE(req->i915))
Colin Xu47d9d3b2018-06-11 15:39:36 +0800304 && is_inhibit_context(req->hw_context))
Weinan Licd7e61b2018-02-23 14:46:45 +0800305 intel_vgpu_restore_inhibit_context(vgpu, req);
fred gao0a53bc02017-08-18 15:41:06 +0800306
307 /* allocate shadow ring buffer */
308 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
309 if (IS_ERR(cs)) {
310 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
311 workload->rb_len);
312 return PTR_ERR(cs);
313 }
314
315 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
316
317 /* get shadow ring buffer va */
318 workload->shadow_ring_buffer_va = cs;
319
320 memcpy(cs, shadow_ring_buffer_va,
321 workload->rb_len);
322
323 cs += workload->rb_len / sizeof(u32);
324 intel_ring_advance(workload->req, cs);
325
326 return 0;
327}
328
Chris Wilson7b302552017-11-20 13:29:58 +0000329static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800330{
331 if (!wa_ctx->indirect_ctx.obj)
332 return;
333
334 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
335 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
Weinan Li0f755512019-01-22 13:46:27 +0800336
337 wa_ctx->indirect_ctx.obj = NULL;
338 wa_ctx->indirect_ctx.shadow_va = NULL;
fred gaoa3cfdca2017-08-18 15:41:07 +0800339}
340
Xiong Zhang4f15665c2018-10-18 13:40:31 +0800341static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
342 struct i915_gem_context *ctx)
343{
344 struct intel_vgpu_mm *mm = workload->shadow_mm;
345 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
346 int i = 0;
347
348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
Zhenyu Wang1e18d5e2019-03-01 15:04:12 +0800349 return -EINVAL;
Xiong Zhang4f15665c2018-10-18 13:40:31 +0800350
351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
353 } else {
354 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
355 px_dma(ppgtt->pdp.page_directory[i]) =
356 mm->ppgtt_mm.shadow_pdps[i];
357 }
358 }
359
360 return 0;
361}
362
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800363static int
364intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
365{
366 struct intel_vgpu *vgpu = workload->vgpu;
367 struct intel_vgpu_submission *s = &vgpu->submission;
368 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
369 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
370 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
371 struct i915_request *rq;
372 int ret = 0;
373
374 lockdep_assert_held(&dev_priv->drm.struct_mutex);
375
376 if (workload->req)
377 goto out;
378
379 rq = i915_request_alloc(engine, shadow_ctx);
380 if (IS_ERR(rq)) {
381 gvt_vgpu_err("fail to allocate gem request\n");
382 ret = PTR_ERR(rq);
383 goto out;
384 }
385 workload->req = i915_request_get(rq);
386out:
387 return ret;
388}
389
Ping Gao89ea20b2017-06-29 12:22:42 +0800390/**
391 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
392 * shadow it as well, include ringbuffer,wa_ctx and ctx.
393 * @workload: an abstract entity for each execlist submission.
394 *
395 * This function is called before the workload submitting to i915, to make
396 * sure the content of the workload is valid.
397 */
398int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400399{
Zhi Wang1406a142017-09-10 21:15:18 +0800400 struct intel_vgpu *vgpu = workload->vgpu;
401 struct intel_vgpu_submission *s = &vgpu->submission;
402 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
403 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Chris Wilson1fc44d92018-05-17 22:26:32 +0100404 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
405 struct intel_context *ce;
Zhi Wange4734052016-05-01 07:42:16 -0400406 int ret;
407
Ping Gao87e919d2017-07-04 14:53:03 +0800408 lockdep_assert_held(&dev_priv->drm.struct_mutex);
409
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800410 if (workload->shadow)
Ping Gaod0302e72017-06-29 12:22:43 +0800411 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400412
Ping Gao89ea20b2017-06-29 12:22:42 +0800413 /* pin shadow context by gvt even the shadow context will be pinned
414 * when i915 alloc request. That is because gvt will update the guest
415 * context from shadow context when workload is completed, and at that
416 * moment, i915 may already unpined the shadow context to make the
417 * shadow_ctx pages invalid. So gvt need to pin itself. After update
418 * the guest context, gvt can unpin the shadow_ctx safely.
419 */
Chris Wilson1fc44d92018-05-17 22:26:32 +0100420 ce = intel_context_pin(shadow_ctx, engine);
421 if (IS_ERR(ce)) {
Ping Gao89ea20b2017-06-29 12:22:42 +0800422 gvt_vgpu_err("fail to pin shadow context\n");
Chris Wilson1fc44d92018-05-17 22:26:32 +0100423 return PTR_ERR(ce);
Ping Gao89ea20b2017-06-29 12:22:42 +0800424 }
Zhi Wange4734052016-05-01 07:42:16 -0400425
Chris Wilson1fc44d92018-05-17 22:26:32 +0100426 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
427 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
428 GEN8_CTX_ADDRESSING_MODE_SHIFT;
429
430 if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
431 shadow_context_descriptor_update(ce);
432
433 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
fred gao0a53bc02017-08-18 15:41:06 +0800434 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800435 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800436
Chris Wilson8a68d462019-03-05 18:03:30 +0000437 if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
Chris Wilson1fc44d92018-05-17 22:26:32 +0100438 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
439 if (ret)
440 goto err_shadow;
Zhi Wange4734052016-05-01 07:42:16 -0400441 }
fred gaof2880e02017-11-14 17:09:35 +0800442
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800443 workload->shadow = true;
fred gaof2880e02017-11-14 17:09:35 +0800444 return 0;
fred gaof2880e02017-11-14 17:09:35 +0800445err_shadow:
fred gaoa3cfdca2017-08-18 15:41:07 +0800446 release_shadow_wa_ctx(&workload->wa_ctx);
Chris Wilson1fc44d92018-05-17 22:26:32 +0100447err_unpin:
448 intel_context_unpin(ce);
fred gao0a53bc02017-08-18 15:41:06 +0800449 return ret;
450}
451
Zhi Wangf52c3802017-09-24 21:53:03 +0800452static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
453
Zhi Wangd8235b52017-09-12 22:06:39 +0800454static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
455{
456 struct intel_gvt *gvt = workload->vgpu->gvt;
457 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800458 struct intel_vgpu_shadow_bb *bb;
459 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800460
Zhi Wangf52c3802017-09-24 21:53:03 +0800461 list_for_each_entry(bb, &workload->shadow_bb, list) {
fred gaoef75c682018-03-15 13:21:10 +0800462 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
463 * is only updated into ring_scan_buffer, not real ring address
464 * allocated in later copy_workload_to_ring_buffer. pls be noted
465 * shadow_ring_buffer_va is now pointed to real ring buffer va
466 * in copy_workload_to_ring_buffer.
467 */
468
469 if (bb->bb_offset)
470 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
471 + bb->bb_offset;
472
Zhao Yan96bebe32018-04-04 13:57:09 +0800473 if (bb->ppgtt) {
474 /* for non-priv bb, scan&shadow is only for
475 * debugging purpose, so the content of shadow bb
476 * is the same as original bb. Therefore,
477 * here, rather than switch to shadow bb's gma
478 * address, we directly use original batch buffer's
479 * gma address, and send original bb to hardware
480 * directly
481 */
482 if (bb->clflush & CLFLUSH_AFTER) {
483 drm_clflush_virt_range(bb->va,
484 bb->obj->base.size);
485 bb->clflush &= ~CLFLUSH_AFTER;
486 }
487 i915_gem_obj_finish_shmem_access(bb->obj);
488 bb->accessing = false;
Zhi Wangf52c3802017-09-24 21:53:03 +0800489
Zhao Yan96bebe32018-04-04 13:57:09 +0800490 } else {
491 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
492 NULL, 0, 0, 0);
493 if (IS_ERR(bb->vma)) {
494 ret = PTR_ERR(bb->vma);
495 goto err;
496 }
497
498 /* relocate shadow batch buffer */
499 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
500 if (gmadr_bytes == 8)
501 bb->bb_start_cmd_va[2] = 0;
502
503 /* No one is going to touch shadow bb from now on. */
504 if (bb->clflush & CLFLUSH_AFTER) {
505 drm_clflush_virt_range(bb->va,
506 bb->obj->base.size);
507 bb->clflush &= ~CLFLUSH_AFTER;
508 }
509
510 ret = i915_gem_object_set_to_gtt_domain(bb->obj,
511 false);
512 if (ret)
513 goto err;
514
515 i915_gem_obj_finish_shmem_access(bb->obj);
516 bb->accessing = false;
517
Chris Wilsona5236972018-07-06 11:39:44 +0100518 ret = i915_vma_move_to_active(bb->vma,
519 workload->req,
520 0);
521 if (ret)
522 goto err;
Zhi Wangf52c3802017-09-24 21:53:03 +0800523 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800524 }
525 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800526err:
527 release_shadow_batch_buffer(workload);
528 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800529}
530
Chris Wilson1fc44d92018-05-17 22:26:32 +0100531static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
Zhi Wangd8235b52017-09-12 22:06:39 +0800532{
Chris Wilson1fc44d92018-05-17 22:26:32 +0100533 struct intel_vgpu_workload *workload =
534 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
535 struct i915_request *rq = workload->req;
536 struct execlist_ring_context *shadow_ring_context =
537 (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
Zhi Wangd8235b52017-09-12 22:06:39 +0800538
539 shadow_ring_context->bb_per_ctx_ptr.val =
540 (shadow_ring_context->bb_per_ctx_ptr.val &
541 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
542 shadow_ring_context->rcs_indirect_ctx.val =
543 (shadow_ring_context->rcs_indirect_ctx.val &
544 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
Zhi Wangd8235b52017-09-12 22:06:39 +0800545}
546
547static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
548{
549 struct i915_vma *vma;
550 unsigned char *per_ctx_va =
551 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
552 wa_ctx->indirect_ctx.size;
553
554 if (wa_ctx->indirect_ctx.size == 0)
555 return 0;
556
557 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
558 0, CACHELINE_BYTES, 0);
559 if (IS_ERR(vma))
560 return PTR_ERR(vma);
561
562 /* FIXME: we are not tracking our pinned VMA leaving it
563 * up to the core to fix up the stray pin_count upon
564 * free.
565 */
566
567 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
568
569 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
570 memset(per_ctx_va, 0, CACHELINE_BYTES);
571
572 update_wa_ctx_2_shadow_ctx(wa_ctx);
573 return 0;
574}
575
576static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
577{
Zhi Wangf52c3802017-09-24 21:53:03 +0800578 struct intel_vgpu *vgpu = workload->vgpu;
579 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
580 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800581
Zhi Wangf52c3802017-09-24 21:53:03 +0800582 if (list_empty(&workload->shadow_bb))
583 return;
584
585 bb = list_first_entry(&workload->shadow_bb,
586 struct intel_vgpu_shadow_bb, list);
587
588 mutex_lock(&dev_priv->drm.struct_mutex);
589
590 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
591 if (bb->obj) {
592 if (bb->accessing)
593 i915_gem_obj_finish_shmem_access(bb->obj);
594
595 if (bb->va && !IS_ERR(bb->va))
596 i915_gem_object_unpin_map(bb->obj);
597
598 if (bb->vma && !IS_ERR(bb->vma)) {
599 i915_vma_unpin(bb->vma);
600 i915_vma_close(bb->vma);
601 }
602 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800603 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800604 list_del(&bb->list);
605 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800606 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800607
608 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800609}
610
Zhi Wang497aa3f2017-09-12 21:51:10 +0800611static int prepare_workload(struct intel_vgpu_workload *workload)
612{
Zhi Wangd8235b52017-09-12 22:06:39 +0800613 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800614 int ret = 0;
615
Zhi Wangd8235b52017-09-12 22:06:39 +0800616 ret = intel_vgpu_pin_mm(workload->shadow_mm);
617 if (ret) {
618 gvt_vgpu_err("fail to vgpu pin mm\n");
619 return ret;
620 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800621
Zhi Wangb20c0d52018-02-07 18:12:15 +0800622 update_shadow_pdps(workload);
623
Zhi Wangd8235b52017-09-12 22:06:39 +0800624 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
625 if (ret) {
626 gvt_vgpu_err("fail to vgpu sync oos pages\n");
627 goto err_unpin_mm;
628 }
629
630 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
631 if (ret) {
632 gvt_vgpu_err("fail to flush post shadow\n");
633 goto err_unpin_mm;
634 }
635
Zhenyu Wang6bb2a2a2018-05-21 16:17:52 +0800636 ret = copy_workload_to_ring_buffer(workload);
fred gaof2880e02017-11-14 17:09:35 +0800637 if (ret) {
638 gvt_vgpu_err("fail to generate request\n");
639 goto err_unpin_mm;
640 }
641
Zhi Wangd8235b52017-09-12 22:06:39 +0800642 ret = prepare_shadow_batch_buffer(workload);
643 if (ret) {
644 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
645 goto err_unpin_mm;
646 }
647
648 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
649 if (ret) {
650 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
651 goto err_shadow_batch;
652 }
653
654 if (workload->prepare) {
655 ret = workload->prepare(workload);
656 if (ret)
657 goto err_shadow_wa_ctx;
658 }
659
660 return 0;
661err_shadow_wa_ctx:
662 release_shadow_wa_ctx(&workload->wa_ctx);
663err_shadow_batch:
664 release_shadow_batch_buffer(workload);
665err_unpin_mm:
666 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800667 return ret;
668}
669
fred gao0a53bc02017-08-18 15:41:06 +0800670static int dispatch_workload(struct intel_vgpu_workload *workload)
671{
Zhi Wang1406a142017-09-10 21:15:18 +0800672 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang1406a142017-09-10 21:15:18 +0800673 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhenyu Wang1e18d5e2019-03-01 15:04:12 +0800674 struct intel_vgpu_submission *s = &vgpu->submission;
675 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhenyu Wangf552e7b2019-03-01 15:55:04 +0800676 struct i915_request *rq;
fred gao0a53bc02017-08-18 15:41:06 +0800677 int ring_id = workload->ring_id;
Chris Wilson1fc44d92018-05-17 22:26:32 +0100678 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800679
680 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
681 ring_id, workload);
682
Colin Xuf25a49a2018-05-19 12:28:54 +0800683 mutex_lock(&vgpu->vgpu_lock);
fred gao0a53bc02017-08-18 15:41:06 +0800684 mutex_lock(&dev_priv->drm.struct_mutex);
685
Zhenyu Wang1e18d5e2019-03-01 15:04:12 +0800686 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
687 if (ret < 0) {
688 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
689 goto err_req;
690 }
691
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800692 ret = intel_gvt_workload_req_alloc(workload);
693 if (ret)
694 goto err_req;
695
fred gao0a53bc02017-08-18 15:41:06 +0800696 ret = intel_gvt_scan_and_shadow_workload(workload);
697 if (ret)
698 goto out;
699
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800700 ret = populate_shadow_context(workload);
701 if (ret) {
702 release_shadow_wa_ctx(&workload->wa_ctx);
703 goto out;
704 }
705
Zhi Wang497aa3f2017-09-12 21:51:10 +0800706 ret = prepare_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800707out:
Zhenyu Wangf552e7b2019-03-01 15:55:04 +0800708 if (ret) {
709 /* We might still need to add request with
710 * clean ctx to retire it properly..
711 */
712 rq = fetch_and_zero(&workload->req);
713 i915_request_put(rq);
714 }
715
Ping Gao89ea20b2017-06-29 12:22:42 +0800716 if (!IS_ERR_OR_NULL(workload->req)) {
717 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
718 ring_id, workload->req);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000719 i915_request_add(workload->req);
Ping Gao89ea20b2017-06-29 12:22:42 +0800720 workload->dispatched = true;
721 }
Zhenyu Wangf0e99432018-12-29 11:13:10 +0800722err_req:
723 if (ret)
724 workload->status = ret;
Pei Zhang90d27a12016-11-14 18:02:57 +0800725 mutex_unlock(&dev_priv->drm.struct_mutex);
Colin Xuf25a49a2018-05-19 12:28:54 +0800726 mutex_unlock(&vgpu->vgpu_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400727 return ret;
728}
729
730static struct intel_vgpu_workload *pick_next_workload(
731 struct intel_gvt *gvt, int ring_id)
732{
733 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
734 struct intel_vgpu_workload *workload = NULL;
735
Colin Xu9a512e22018-05-19 12:28:55 +0800736 mutex_lock(&gvt->sched_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400737
738 /*
739 * no current vgpu / will be scheduled out / no workload
740 * bail out
741 */
742 if (!scheduler->current_vgpu) {
743 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
744 goto out;
745 }
746
747 if (scheduler->need_reschedule) {
748 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
749 goto out;
750 }
751
Weinan Li9f498472019-02-27 15:36:58 +0800752 if (!scheduler->current_vgpu->active ||
753 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400754 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400755
756 /*
757 * still have current workload, maybe the workload disptacher
758 * fail to submit it for some reason, resubmit it.
759 */
760 if (scheduler->current_workload[ring_id]) {
761 workload = scheduler->current_workload[ring_id];
762 gvt_dbg_sched("ring id %d still have current workload %p\n",
763 ring_id, workload);
764 goto out;
765 }
766
767 /*
768 * pick a workload as current workload
769 * once current workload is set, schedule policy routines
770 * will wait the current workload is finished when trying to
771 * schedule out a vgpu.
772 */
773 scheduler->current_workload[ring_id] = container_of(
774 workload_q_head(scheduler->current_vgpu, ring_id)->next,
775 struct intel_vgpu_workload, list);
776
777 workload = scheduler->current_workload[ring_id];
778
779 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
780
Zhi Wang1406a142017-09-10 21:15:18 +0800781 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400782out:
Colin Xu9a512e22018-05-19 12:28:55 +0800783 mutex_unlock(&gvt->sched_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400784 return workload;
785}
786
787static void update_guest_context(struct intel_vgpu_workload *workload)
788{
Chris Wilson1fc44d92018-05-17 22:26:32 +0100789 struct i915_request *rq = workload->req;
Zhi Wange4734052016-05-01 07:42:16 -0400790 struct intel_vgpu *vgpu = workload->vgpu;
791 struct intel_gvt *gvt = vgpu->gvt;
Chris Wilson1fc44d92018-05-17 22:26:32 +0100792 struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
Zhi Wange4734052016-05-01 07:42:16 -0400793 struct execlist_ring_context *shadow_ring_context;
794 struct page *page;
795 void *src;
796 unsigned long context_gpa, context_page_num;
797 int i;
798
Chris Wilson1fc44d92018-05-17 22:26:32 +0100799 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
800 workload->ctx_desc.lrca);
Zhi Wange4734052016-05-01 07:42:16 -0400801
Chris Wilson1fc44d92018-05-17 22:26:32 +0100802 context_page_num = rq->engine->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400803 context_page_num = context_page_num >> PAGE_SHIFT;
804
Chris Wilson8a68d462019-03-05 18:03:30 +0000805 if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
Zhi Wange4734052016-05-01 07:42:16 -0400806 context_page_num = 19;
807
808 i = 2;
809
810 while (i < context_page_num) {
811 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
812 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800813 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400814 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500815 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400816 return;
817 }
818
Michel Thierry0b29c752017-09-13 09:56:00 +0100819 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800820 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400821 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800822 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800823 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400824 i++;
825 }
826
827 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
828 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
829
830 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800831 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400832
833#define COPY_REG(name) \
834 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
835 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
836
837 COPY_REG(ctx_ctrl);
838 COPY_REG(ctx_timestamp);
839
840#undef COPY_REG
841
842 intel_gvt_hypervisor_write_gpa(vgpu,
843 workload->ring_context_gpa +
844 sizeof(*shadow_ring_context),
845 (void *)shadow_ring_context +
846 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800847 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400848
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800849 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400850}
851
Hang Yuanf9090d42018-08-07 18:29:21 +0800852void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
Chris Wilson3a891a62019-04-01 17:26:39 +0100853 intel_engine_mask_t engine_mask)
Zhi Wange2c43c02017-09-13 01:58:35 +0800854{
855 struct intel_vgpu_submission *s = &vgpu->submission;
856 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
857 struct intel_engine_cs *engine;
858 struct intel_vgpu_workload *pos, *n;
Chris Wilson3a891a62019-04-01 17:26:39 +0100859 intel_engine_mask_t tmp;
Zhi Wange2c43c02017-09-13 01:58:35 +0800860
861 /* free the unsubmited workloads in the queues. */
862 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
863 list_for_each_entry_safe(pos, n,
864 &s->workload_q_head[engine->id], list) {
865 list_del_init(&pos->list);
866 intel_vgpu_destroy_workload(pos);
867 }
868 clear_bit(engine->id, s->shadow_ctx_desc_updated);
869 }
870}
871
Zhi Wange4734052016-05-01 07:42:16 -0400872static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
873{
874 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800875 struct intel_vgpu_workload *workload =
876 scheduler->current_workload[ring_id];
877 struct intel_vgpu *vgpu = workload->vgpu;
878 struct intel_vgpu_submission *s = &vgpu->submission;
Zhenyu Wang6bb2a2a2018-05-21 16:17:52 +0800879 struct i915_request *rq = workload->req;
Zhi Wangbe1da702016-05-03 18:26:57 -0400880 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400881
Colin Xuf25a49a2018-05-19 12:28:54 +0800882 mutex_lock(&vgpu->vgpu_lock);
Colin Xu9a512e22018-05-19 12:28:55 +0800883 mutex_lock(&gvt->sched_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400884
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800885 /* For the workload w/ request, needs to wait for the context
886 * switch to make sure request is completed.
887 * For the workload w/o request, directly complete the workload.
888 */
Chris Wilson1fc44d92018-05-17 22:26:32 +0100889 if (rq) {
Zhi Wange4734052016-05-01 07:42:16 -0400890 wait_event(workload->shadow_ctx_status_wq,
891 !atomic_read(&workload->shadow_ctx_active));
892
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800893 /* If this request caused GPU hang, req->fence.error will
894 * be set to -EIO. Use -EIO to set workload status so
895 * that when this request caused GPU hang, didn't trigger
896 * context switch interrupt to guest.
897 */
898 if (likely(workload->status == -EINPROGRESS)) {
899 if (workload->req->fence.error == -EIO)
900 workload->status = -EIO;
901 else
902 workload->status = 0;
903 }
904
Chris Wilson8a68d462019-03-05 18:03:30 +0000905 if (!workload->status &&
906 !(vgpu->resetting_eng & BIT(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800907 update_guest_context(workload);
908
909 for_each_set_bit(event, workload->pending_events,
910 INTEL_GVT_EVENT_MAX)
911 intel_vgpu_trigger_virtual_event(vgpu, event);
912 }
Chris Wilson1fc44d92018-05-17 22:26:32 +0100913
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800914 /* unpin shadow ctx as the shadow_ctx update is done */
Chris Wilson1fc44d92018-05-17 22:26:32 +0100915 mutex_lock(&rq->i915->drm.struct_mutex);
916 intel_context_unpin(rq->hw_context);
917 mutex_unlock(&rq->i915->drm.struct_mutex);
918
Zhenyu Wang6bb2a2a2018-05-21 16:17:52 +0800919 i915_request_put(fetch_and_zero(&workload->req));
Zhi Wange4734052016-05-01 07:42:16 -0400920 }
921
922 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
923 ring_id, workload, workload->status);
924
925 scheduler->current_workload[ring_id] = NULL;
926
Zhi Wange4734052016-05-01 07:42:16 -0400927 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800928
Chris Wilson8a68d462019-03-05 18:03:30 +0000929 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
Zhi Wange2c43c02017-09-13 01:58:35 +0800930 /* if workload->status is not successful means HW GPU
931 * has occurred GPU hang or something wrong with i915/GVT,
932 * and GVT won't inject context switch interrupt to guest.
933 * So this error is a vGPU hang actually to the guest.
934 * According to this we should emunlate a vGPU hang. If
935 * there are pending workloads which are already submitted
936 * from guest, we should clean them up like HW GPU does.
937 *
938 * if it is in middle of engine resetting, the pending
939 * workloads won't be submitted to HW GPU and will be
940 * cleaned up during the resetting process later, so doing
941 * the workload clean up here doesn't have any impact.
942 **/
Chris Wilson8a68d462019-03-05 18:03:30 +0000943 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
Zhi Wange2c43c02017-09-13 01:58:35 +0800944 }
945
Zhi Wange4734052016-05-01 07:42:16 -0400946 workload->complete(workload);
947
Zhi Wang1406a142017-09-10 21:15:18 +0800948 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400949 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800950
951 if (gvt->scheduler.need_reschedule)
952 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
953
Colin Xu9a512e22018-05-19 12:28:55 +0800954 mutex_unlock(&gvt->sched_lock);
Colin Xuf25a49a2018-05-19 12:28:54 +0800955 mutex_unlock(&vgpu->vgpu_lock);
Zhi Wange4734052016-05-01 07:42:16 -0400956}
957
958struct workload_thread_param {
959 struct intel_gvt *gvt;
960 int ring_id;
961};
962
963static int workload_thread(void *priv)
964{
965 struct workload_thread_param *p = (struct workload_thread_param *)priv;
966 struct intel_gvt *gvt = p->gvt;
967 int ring_id = p->ring_id;
968 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
969 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500970 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400971 int ret;
fred gaoc3b5a842019-01-09 09:20:07 +0800972 bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
Du, Changbine45d7b72016-10-27 11:10:31 +0800973 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400974
975 kfree(p);
976
977 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
978
979 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800980 add_wait_queue(&scheduler->waitq[ring_id], &wait);
981 do {
982 workload = pick_next_workload(gvt, ring_id);
983 if (workload)
984 break;
985 wait_woken(&wait, TASK_INTERRUPTIBLE,
986 MAX_SCHEDULE_TIMEOUT);
987 } while (!kthread_should_stop());
988 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400989
Du, Changbine45d7b72016-10-27 11:10:31 +0800990 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400991 break;
992
993 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
994 workload->ring_id, workload,
995 workload->vgpu->id);
996
997 intel_runtime_pm_get(gvt->dev_priv);
998
Zhi Wange4734052016-05-01 07:42:16 -0400999 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
1000 workload->ring_id, workload);
1001
1002 if (need_force_wake)
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001003 intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
Zhi Wange4734052016-05-01 07:42:16 -04001004 FORCEWAKE_ALL);
1005
1006 ret = dispatch_workload(workload);
Chris Wilson66bbc3b2016-10-19 11:11:44 +01001007
Zhi Wange4734052016-05-01 07:42:16 -04001008 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -05001009 vgpu = workload->vgpu;
1010 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -04001011 goto complete;
1012 }
1013
1014 gvt_dbg_sched("ring id %d wait workload %p\n",
1015 workload->ring_id, workload);
Chris Wilsone61e0f52018-02-21 09:56:36 +00001016 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -04001017
1018complete:
Changbin Du3ce32742017-02-09 10:13:16 +08001019 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -04001020 workload, workload->status);
1021
Changbin Du2e51ef32017-01-05 13:28:05 +08001022 complete_current_workload(gvt, ring_id);
1023
Zhi Wange4734052016-05-01 07:42:16 -04001024 if (need_force_wake)
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001025 intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
Zhi Wange4734052016-05-01 07:42:16 -04001026 FORCEWAKE_ALL);
1027
Chris Wilson16e4dd032019-01-14 14:21:10 +00001028 intel_runtime_pm_put_unchecked(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +08001029 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +08001030 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -04001031 }
1032 return 0;
1033}
1034
1035void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1036{
Zhi Wang1406a142017-09-10 21:15:18 +08001037 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -04001038 struct intel_gvt *gvt = vgpu->gvt;
1039 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1040
Zhi Wang1406a142017-09-10 21:15:18 +08001041 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -04001042 gvt_dbg_sched("wait vgpu idle\n");
1043
1044 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +08001045 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -04001046 }
1047}
1048
1049void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1050{
1051 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +08001052 struct intel_engine_cs *engine;
1053 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001054
1055 gvt_dbg_core("clean workload scheduler\n");
1056
Changbin Du3fc03062017-03-13 10:47:11 +08001057 for_each_engine(engine, gvt->dev_priv, i) {
1058 atomic_notifier_chain_unregister(
1059 &engine->context_status_notifier,
1060 &gvt->shadow_ctx_notifier_block[i]);
1061 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001062 }
1063}
1064
1065int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1066{
1067 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1068 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +08001069 struct intel_engine_cs *engine;
1070 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -04001071 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001072
1073 gvt_dbg_core("init workload scheduler\n");
1074
1075 init_waitqueue_head(&scheduler->workload_complete_wq);
1076
Changbin Du3fc03062017-03-13 10:47:11 +08001077 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -04001078 init_waitqueue_head(&scheduler->waitq[i]);
1079
1080 param = kzalloc(sizeof(*param), GFP_KERNEL);
1081 if (!param) {
1082 ret = -ENOMEM;
1083 goto err;
1084 }
1085
1086 param->gvt = gvt;
1087 param->ring_id = i;
1088
1089 scheduler->thread[i] = kthread_run(workload_thread, param,
1090 "gvt workload %d", i);
1091 if (IS_ERR(scheduler->thread[i])) {
1092 gvt_err("fail to create workload thread\n");
1093 ret = PTR_ERR(scheduler->thread[i]);
1094 goto err;
1095 }
Changbin Du3fc03062017-03-13 10:47:11 +08001096
1097 gvt->shadow_ctx_notifier_block[i].notifier_call =
1098 shadow_context_status_change;
1099 atomic_notifier_chain_register(&engine->context_status_notifier,
1100 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001101 }
1102 return 0;
1103err:
1104 intel_gvt_clean_workload_scheduler(gvt);
1105 kfree(param);
1106 param = NULL;
1107 return ret;
1108}
1109
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001110static void
1111i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
1112{
1113 struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
1114 int i;
1115
Chris Wilsona9fe9ca2019-03-14 22:38:38 +00001116 if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001117 px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
Chris Wilsona9fe9ca2019-03-14 22:38:38 +00001118 } else {
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001119 for (i = 0; i < GEN8_3LVL_PDPES; i++)
1120 px_dma(i915_ppgtt->pdp.page_directory[i]) =
1121 s->i915_context_pdps[i];
1122 }
1123}
1124
Zhi Wang874b6a92017-09-10 20:08:18 +08001125/**
1126 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1127 * @vgpu: a vGPU
1128 *
1129 * This function is called when a vGPU is being destroyed.
1130 *
1131 */
1132void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001133{
Zhi Wang1406a142017-09-10 21:15:18 +08001134 struct intel_vgpu_submission *s = &vgpu->submission;
1135
Weinan Li7569a062018-01-26 15:09:07 +08001136 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001137 i915_context_ppgtt_root_restore(s);
Zhi Wang1406a142017-09-10 21:15:18 +08001138 i915_gem_context_put(s->shadow_ctx);
1139 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001140}
1141
Zhi Wang06bb3722017-09-13 01:41:35 +08001142
1143/**
1144 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1145 * @vgpu: a vGPU
1146 * @engine_mask: engines expected to be reset
1147 *
1148 * This function is called when a vGPU is being destroyed.
1149 *
1150 */
1151void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
Chris Wilson3a891a62019-04-01 17:26:39 +01001152 intel_engine_mask_t engine_mask)
Zhi Wang06bb3722017-09-13 01:41:35 +08001153{
1154 struct intel_vgpu_submission *s = &vgpu->submission;
1155
1156 if (!s->active)
1157 return;
1158
Hang Yuanf9090d42018-08-07 18:29:21 +08001159 intel_vgpu_clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001160 s->ops->reset(vgpu, engine_mask);
1161}
1162
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001163static void
1164i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
1165{
1166 struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
1167 int i;
1168
Chris Wilsona9fe9ca2019-03-14 22:38:38 +00001169 if (i915_vm_is_4lvl(&i915_ppgtt->vm))
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001170 s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
1171 else {
1172 for (i = 0; i < GEN8_3LVL_PDPES; i++)
1173 s->i915_context_pdps[i] =
1174 px_dma(i915_ppgtt->pdp.page_directory[i]);
1175 }
1176}
1177
Zhi Wang874b6a92017-09-10 20:08:18 +08001178/**
1179 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1180 * @vgpu: a vGPU
1181 *
1182 * This function is called when a vGPU is being created.
1183 *
1184 * Returns:
1185 * Zero on success, negative error code if failed.
1186 *
1187 */
1188int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001189{
Zhi Wang1406a142017-09-10 21:15:18 +08001190 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001191 enum intel_engine_id i;
1192 struct intel_engine_cs *engine;
1193 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001194
Zhi Wang1406a142017-09-10 21:15:18 +08001195 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001196 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001197 if (IS_ERR(s->shadow_ctx))
1198 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001199
Xiong Zhangf39a89b2018-11-29 16:25:54 +08001200 i915_context_ppgtt_root_save(s);
1201
Zhi Wang1406a142017-09-10 21:15:18 +08001202 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001203
Zhenyu Wang850555d2018-02-14 11:35:01 +08001204 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1205 sizeof(struct intel_vgpu_workload), 0,
1206 SLAB_HWCACHE_ALIGN,
1207 offsetof(struct intel_vgpu_workload, rb_tail),
1208 sizeof_field(struct intel_vgpu_workload, rb_tail),
1209 NULL);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001210
Zhi Wang1406a142017-09-10 21:15:18 +08001211 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001212 ret = -ENOMEM;
1213 goto out_shadow_ctx;
1214 }
1215
1216 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001217 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001218
Zhi Wang1406a142017-09-10 21:15:18 +08001219 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001220 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001221
Zhi Wange4734052016-05-01 07:42:16 -04001222 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001223
1224out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001225 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001226 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001227}
Zhi Wang21527a82017-09-12 21:42:09 +08001228
1229/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001230 * intel_vgpu_select_submission_ops - select virtual submission interface
1231 * @vgpu: a vGPU
Zhenyu Wanga752b072018-07-31 11:02:12 +08001232 * @engine_mask: either ALL_ENGINES or target engine mask
Zhi Wangad1d3632017-09-13 00:31:29 +08001233 * @interface: expected vGPU virtual submission interface
1234 *
1235 * This function is called when guest configures submission interface.
1236 *
1237 * Returns:
1238 * Zero on success, negative error code if failed.
1239 *
1240 */
1241int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Chris Wilson3a891a62019-04-01 17:26:39 +01001242 intel_engine_mask_t engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001243 unsigned int interface)
1244{
1245 struct intel_vgpu_submission *s = &vgpu->submission;
1246 const struct intel_vgpu_submission_ops *ops[] = {
1247 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1248 &intel_vgpu_execlist_submission_ops,
1249 };
1250 int ret;
1251
1252 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1253 return -EINVAL;
1254
Weinan Li9212b132018-01-26 15:09:08 +08001255 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1256 return -EINVAL;
1257
1258 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001259 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001260
1261 if (interface == 0) {
1262 s->ops = NULL;
1263 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001264 s->active = false;
1265 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001266 return 0;
1267 }
1268
Weinan Li7569a062018-01-26 15:09:07 +08001269 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001270 if (ret)
1271 return ret;
1272
1273 s->ops = ops[interface];
1274 s->virtual_submission_interface = interface;
1275 s->active = true;
1276
1277 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1278 vgpu->id, s->ops->name);
1279
1280 return 0;
1281}
1282
1283/**
Zhi Wang21527a82017-09-12 21:42:09 +08001284 * intel_vgpu_destroy_workload - destroy a vGPU workload
Zhenyu Wanga752b072018-07-31 11:02:12 +08001285 * @workload: workload to destroy
Zhi Wang21527a82017-09-12 21:42:09 +08001286 *
1287 * This function is called when destroy a vGPU workload.
1288 *
1289 */
1290void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1291{
1292 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1293
Weinan Li0f755512019-01-22 13:46:27 +08001294 release_shadow_batch_buffer(workload);
1295 release_shadow_wa_ctx(&workload->wa_ctx);
1296
Zhi Wang21527a82017-09-12 21:42:09 +08001297 if (workload->shadow_mm)
Changbin Du1bc25852018-01-30 19:19:41 +08001298 intel_vgpu_mm_put(workload->shadow_mm);
Zhi Wang21527a82017-09-12 21:42:09 +08001299
1300 kmem_cache_free(s->workloads, workload);
1301}
1302
Zhi Wang6d763032017-09-12 22:33:12 +08001303static struct intel_vgpu_workload *
1304alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001305{
1306 struct intel_vgpu_submission *s = &vgpu->submission;
1307 struct intel_vgpu_workload *workload;
1308
1309 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1310 if (!workload)
1311 return ERR_PTR(-ENOMEM);
1312
1313 INIT_LIST_HEAD(&workload->list);
1314 INIT_LIST_HEAD(&workload->shadow_bb);
1315
1316 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1317 atomic_set(&workload->shadow_ctx_active, 0);
1318
1319 workload->status = -EINPROGRESS;
Zhi Wang21527a82017-09-12 21:42:09 +08001320 workload->vgpu = vgpu;
1321
1322 return workload;
1323}
Zhi Wang6d763032017-09-12 22:33:12 +08001324
1325#define RING_CTX_OFF(x) \
1326 offsetof(struct execlist_ring_context, x)
1327
1328static void read_guest_pdps(struct intel_vgpu *vgpu,
1329 u64 ring_context_gpa, u32 pdp[8])
1330{
1331 u64 gpa;
1332 int i;
1333
Xinyun Liu1417fad2018-06-07 22:48:42 +08001334 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
Zhi Wang6d763032017-09-12 22:33:12 +08001335
1336 for (i = 0; i < 8; i++)
1337 intel_gvt_hypervisor_read_gpa(vgpu,
1338 gpa + i * 8, &pdp[7 - i], 4);
1339}
1340
1341static int prepare_mm(struct intel_vgpu_workload *workload)
1342{
1343 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1344 struct intel_vgpu_mm *mm;
1345 struct intel_vgpu *vgpu = workload->vgpu;
Aleksei Gimbitskii0cf8f582019-04-23 15:04:08 +03001346 enum intel_gvt_gtt_type root_entry_type;
Changbin Duede9d0c2018-01-30 19:19:40 +08001347 u64 pdps[GVT_RING_CTX_NR_PDPS];
Zhi Wang6d763032017-09-12 22:33:12 +08001348
Changbin Duede9d0c2018-01-30 19:19:40 +08001349 switch (desc->addressing_mode) {
1350 case 1: /* legacy 32-bit */
1351 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1352 break;
1353 case 3: /* legacy 64-bit */
1354 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1355 break;
1356 default:
Zhi Wang6d763032017-09-12 22:33:12 +08001357 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1358 return -EINVAL;
1359 }
1360
Changbin Duede9d0c2018-01-30 19:19:40 +08001361 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
Zhi Wang6d763032017-09-12 22:33:12 +08001362
Changbin Due6e9c462018-01-30 19:19:46 +08001363 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1364 if (IS_ERR(mm))
1365 return PTR_ERR(mm);
Zhi Wang6d763032017-09-12 22:33:12 +08001366
Zhi Wang6d763032017-09-12 22:33:12 +08001367 workload->shadow_mm = mm;
1368 return 0;
1369}
1370
1371#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1372 ((a)->lrca == (b)->lrca))
1373
1374#define get_last_workload(q) \
1375 (list_empty(q) ? NULL : container_of(q->prev, \
1376 struct intel_vgpu_workload, list))
1377/**
1378 * intel_vgpu_create_workload - create a vGPU workload
1379 * @vgpu: a vGPU
Zhenyu Wanga752b072018-07-31 11:02:12 +08001380 * @ring_id: ring index
Zhi Wang6d763032017-09-12 22:33:12 +08001381 * @desc: a guest context descriptor
1382 *
1383 * This function is called when creating a vGPU workload.
1384 *
1385 * Returns:
1386 * struct intel_vgpu_workload * on success, negative error code in
1387 * pointer if failed.
1388 *
1389 */
1390struct intel_vgpu_workload *
1391intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1392 struct execlist_ctx_descriptor_format *desc)
1393{
1394 struct intel_vgpu_submission *s = &vgpu->submission;
1395 struct list_head *q = workload_q_head(vgpu, ring_id);
1396 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1397 struct intel_vgpu_workload *workload = NULL;
1398 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1399 u64 ring_context_gpa;
1400 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1401 int ret;
1402
1403 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001404 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001405 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1406 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1407 return ERR_PTR(-EINVAL);
1408 }
1409
1410 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1411 RING_CTX_OFF(ring_header.val), &head, 4);
1412
1413 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1414 RING_CTX_OFF(ring_tail.val), &tail, 4);
1415
1416 head &= RB_HEAD_OFF_MASK;
1417 tail &= RB_TAIL_OFF_MASK;
1418
1419 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1420 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1421 gvt_dbg_el("ctx head %x real head %lx\n", head,
1422 last_workload->rb_tail);
1423 /*
1424 * cannot use guest context head pointer here,
1425 * as it might not be updated at this time
1426 */
1427 head = last_workload->rb_tail;
1428 }
1429
1430 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1431
1432 /* record some ring buffer register values for scan and shadow */
1433 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1434 RING_CTX_OFF(rb_start.val), &start, 4);
1435 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1436 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1437 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1438 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1439
1440 workload = alloc_workload(vgpu);
1441 if (IS_ERR(workload))
1442 return workload;
1443
1444 workload->ring_id = ring_id;
1445 workload->ctx_desc = *desc;
1446 workload->ring_context_gpa = ring_context_gpa;
1447 workload->rb_head = head;
1448 workload->rb_tail = tail;
1449 workload->rb_start = start;
1450 workload->rb_ctl = ctl;
1451
Chris Wilson8a68d462019-03-05 18:03:30 +00001452 if (ring_id == RCS0) {
Zhi Wang6d763032017-09-12 22:33:12 +08001453 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1454 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1455 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1456 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1457
1458 workload->wa_ctx.indirect_ctx.guest_gma =
1459 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1460 workload->wa_ctx.indirect_ctx.size =
1461 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1462 CACHELINE_BYTES;
1463 workload->wa_ctx.per_ctx.guest_gma =
1464 per_ctx & PER_CTX_ADDR_MASK;
1465 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1466 }
1467
1468 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1469 workload, ring_id, head, tail, start, ctl);
1470
1471 ret = prepare_mm(workload);
1472 if (ret) {
1473 kmem_cache_free(s->workloads, workload);
1474 return ERR_PTR(ret);
1475 }
1476
1477 /* Only scan and shadow the first workload in the queue
1478 * as there is only one pre-allocated buf-obj for shadow.
1479 */
1480 if (list_empty(workload_q_head(vgpu, ring_id))) {
1481 intel_runtime_pm_get(dev_priv);
1482 mutex_lock(&dev_priv->drm.struct_mutex);
1483 ret = intel_gvt_scan_and_shadow_workload(workload);
1484 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson16e4dd032019-01-14 14:21:10 +00001485 intel_runtime_pm_put_unchecked(dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +08001486 }
1487
Yan Zhaodade58e2019-03-27 00:54:51 -04001488 if (ret) {
1489 if (vgpu_is_vm_unhealthy(ret))
1490 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wang6d763032017-09-12 22:33:12 +08001491 intel_vgpu_destroy_workload(workload);
1492 return ERR_PTR(ret);
1493 }
1494
1495 return workload;
1496}
Changbin Du59a716c2017-11-29 15:40:06 +08001497
1498/**
1499 * intel_vgpu_queue_workload - Qeue a vGPU workload
1500 * @workload: the workload to queue in
1501 */
1502void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1503{
1504 list_add_tail(&workload->list,
1505 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001506 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001507 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1508}