blob: ac538dcfff61a2a25eca2843058ab70665f1db10 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
55static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 struct intel_gvt *gvt = vgpu->gvt;
59 int ring_id = workload->ring_id;
60 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
61 struct drm_i915_gem_object *ctx_obj =
62 shadow_ctx->engine[ring_id].state->obj;
63 struct execlist_ring_context *shadow_ring_context;
64 struct page *page;
65 void *dst;
66 unsigned long context_gpa, context_page_num;
67 int i;
68
69 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
70 workload->ctx_desc.lrca);
71
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030072 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -040073
74 context_page_num = context_page_num >> PAGE_SHIFT;
75
76 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
77 context_page_num = 19;
78
79 i = 2;
80
81 while (i < context_page_num) {
82 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
83 (u32)((workload->ctx_desc.lrca + i) <<
84 GTT_PAGE_SHIFT));
85 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -050086 gvt_vgpu_err("Invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -040087 return -EINVAL;
88 }
89
90 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080091 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040092 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
93 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080094 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -040095 i++;
96 }
97
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +080099 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400100
101#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
104
105 COPY_REG(ctx_ctrl);
106 COPY_REG(ctx_timestamp);
107
108 if (ring_id == RCS) {
109 COPY_REG(bb_per_ctx_ptr);
110 COPY_REG(rcs_indirect_ctx);
111 COPY_REG(rcs_indirect_ctx_offset);
112 }
113#undef COPY_REG
114
115 set_context_pdp_root_pointer(shadow_ring_context,
116 workload->shadow_mm->shadow_page_table);
117
118 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context),
121 (void *)shadow_ring_context +
122 sizeof(*shadow_ring_context),
123 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800125 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400126 return 0;
127}
128
Changbin Dubc2d4b62017-03-22 12:35:31 +0800129static inline bool is_gvt_request(struct drm_i915_gem_request *req)
130{
131 return i915_gem_context_force_single_submission(req->ctx);
132}
133
Zhi Wange4734052016-05-01 07:42:16 -0400134static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data)
136{
Changbin Du3fc03062017-03-13 10:47:11 +0800137 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
138 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
139 shadow_ctx_notifier_block[req->engine->id]);
140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wange4734052016-05-01 07:42:16 -0400141 struct intel_vgpu_workload *workload =
142 scheduler->current_workload[req->engine->id];
143
Changbin Dubc2d4b62017-03-22 12:35:31 +0800144 if (!is_gvt_request(req) || unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800145 return NOTIFY_OK;
146
Zhi Wange4734052016-05-01 07:42:16 -0400147 switch (action) {
148 case INTEL_CONTEXT_SCHEDULE_IN:
Zhi Wang17865712016-05-01 19:02:37 -0400149 intel_gvt_load_render_mmio(workload->vgpu,
150 workload->ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400151 atomic_set(&workload->shadow_ctx_active, 1);
152 break;
153 case INTEL_CONTEXT_SCHEDULE_OUT:
Zhi Wang17865712016-05-01 19:02:37 -0400154 intel_gvt_restore_render_mmio(workload->vgpu,
155 workload->ring_id);
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800156 /* If the status is -EINPROGRESS means this workload
157 * doesn't meet any issue during dispatching so when
158 * get the SCHEDULE_OUT set the status to be zero for
159 * good. If the status is NOT -EINPROGRESS means there
160 * is something wrong happened during dispatching and
161 * the status should not be set to zero
162 */
163 if (workload->status == -EINPROGRESS)
164 workload->status = 0;
Zhi Wange4734052016-05-01 07:42:16 -0400165 atomic_set(&workload->shadow_ctx_active, 0);
166 break;
167 default:
168 WARN_ON(1);
169 return NOTIFY_OK;
170 }
171 wake_up(&workload->shadow_ctx_status_wq);
172 return NOTIFY_OK;
173}
174
175static int dispatch_workload(struct intel_vgpu_workload *workload)
176{
Zhi Wange4734052016-05-01 07:42:16 -0400177 int ring_id = workload->ring_id;
178 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
179 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800180 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilson0eb742d2016-10-20 17:29:36 +0800181 struct drm_i915_gem_request *rq;
Tina Zhang695fbc02017-03-10 04:26:53 -0500182 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400183 int ret;
184
185 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
186 ring_id, workload);
187
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800188 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
189 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400190 GEN8_CTX_ADDRESSING_MODE_SHIFT;
191
Pei Zhang90d27a12016-11-14 18:02:57 +0800192 mutex_lock(&dev_priv->drm.struct_mutex);
193
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800194 /* pin shadow context by gvt even the shadow context will be pinned
195 * when i915 alloc request. That is because gvt will update the guest
196 * context from shadow context when workload is completed, and at that
197 * moment, i915 may already unpined the shadow context to make the
198 * shadow_ctx pages invalid. So gvt need to pin itself. After update
199 * the guest context, gvt can unpin the shadow_ctx safely.
200 */
201 ret = engine->context_pin(engine, shadow_ctx);
202 if (ret) {
203 gvt_vgpu_err("fail to pin shadow context\n");
204 workload->status = ret;
205 mutex_unlock(&dev_priv->drm.struct_mutex);
206 return ret;
207 }
208
Chris Wilson0eb742d2016-10-20 17:29:36 +0800209 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
210 if (IS_ERR(rq)) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500211 gvt_vgpu_err("fail to allocate gem request\n");
Zhenyu Wang53d6f8122016-11-24 15:55:49 +0800212 ret = PTR_ERR(rq);
213 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400214 }
215
Chris Wilson0eb742d2016-10-20 17:29:36 +0800216 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
217
218 workload->req = i915_gem_request_get(rq);
Zhi Wange4734052016-05-01 07:42:16 -0400219
Zhi Wangbe1da702016-05-03 18:26:57 -0400220 ret = intel_gvt_scan_and_shadow_workload(workload);
221 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800222 goto out;
Zhi Wangbe1da702016-05-03 18:26:57 -0400223
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400224 if ((workload->ring_id == RCS) &&
225 (workload->wa_ctx.indirect_ctx.size != 0)) {
226 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
227 if (ret)
228 goto out;
229 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400230
Zhi Wange4734052016-05-01 07:42:16 -0400231 ret = populate_shadow_context(workload);
232 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800233 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400234
235 if (workload->prepare) {
236 ret = workload->prepare(workload);
237 if (ret)
Pei Zhang90d27a12016-11-14 18:02:57 +0800238 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400239 }
240
Zhi Wange4734052016-05-01 07:42:16 -0400241 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
242 ring_id, workload->req);
243
Pei Zhang90d27a12016-11-14 18:02:57 +0800244 ret = 0;
Zhi Wange4734052016-05-01 07:42:16 -0400245 workload->dispatched = true;
Pei Zhang90d27a12016-11-14 18:02:57 +0800246out:
247 if (ret)
248 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800249
Zhenyu Wang53d6f8122016-11-24 15:55:49 +0800250 if (!IS_ERR_OR_NULL(rq))
Chris Wilsone642c852017-03-17 11:47:09 +0000251 i915_add_request(rq);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800252 else
253 engine->context_unpin(engine, shadow_ctx);
254
Pei Zhang90d27a12016-11-14 18:02:57 +0800255 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400256 return ret;
257}
258
259static struct intel_vgpu_workload *pick_next_workload(
260 struct intel_gvt *gvt, int ring_id)
261{
262 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
263 struct intel_vgpu_workload *workload = NULL;
264
265 mutex_lock(&gvt->lock);
266
267 /*
268 * no current vgpu / will be scheduled out / no workload
269 * bail out
270 */
271 if (!scheduler->current_vgpu) {
272 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
273 goto out;
274 }
275
276 if (scheduler->need_reschedule) {
277 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
278 goto out;
279 }
280
281 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
282 gvt_dbg_sched("ring id %d stop - no available workload\n",
283 ring_id);
284 goto out;
285 }
286
287 /*
288 * still have current workload, maybe the workload disptacher
289 * fail to submit it for some reason, resubmit it.
290 */
291 if (scheduler->current_workload[ring_id]) {
292 workload = scheduler->current_workload[ring_id];
293 gvt_dbg_sched("ring id %d still have current workload %p\n",
294 ring_id, workload);
295 goto out;
296 }
297
298 /*
299 * pick a workload as current workload
300 * once current workload is set, schedule policy routines
301 * will wait the current workload is finished when trying to
302 * schedule out a vgpu.
303 */
304 scheduler->current_workload[ring_id] = container_of(
305 workload_q_head(scheduler->current_vgpu, ring_id)->next,
306 struct intel_vgpu_workload, list);
307
308 workload = scheduler->current_workload[ring_id];
309
310 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
311
312 atomic_inc(&workload->vgpu->running_workload_num);
313out:
314 mutex_unlock(&gvt->lock);
315 return workload;
316}
317
318static void update_guest_context(struct intel_vgpu_workload *workload)
319{
320 struct intel_vgpu *vgpu = workload->vgpu;
321 struct intel_gvt *gvt = vgpu->gvt;
322 int ring_id = workload->ring_id;
323 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
324 struct drm_i915_gem_object *ctx_obj =
325 shadow_ctx->engine[ring_id].state->obj;
326 struct execlist_ring_context *shadow_ring_context;
327 struct page *page;
328 void *src;
329 unsigned long context_gpa, context_page_num;
330 int i;
331
332 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
333 workload->ctx_desc.lrca);
334
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300335 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400336
337 context_page_num = context_page_num >> PAGE_SHIFT;
338
339 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
340 context_page_num = 19;
341
342 i = 2;
343
344 while (i < context_page_num) {
345 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
346 (u32)((workload->ctx_desc.lrca + i) <<
347 GTT_PAGE_SHIFT));
348 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500349 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400350 return;
351 }
352
353 page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800354 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400355 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
356 GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800357 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400358 i++;
359 }
360
361 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
362 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
363
364 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800365 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400366
367#define COPY_REG(name) \
368 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
369 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
370
371 COPY_REG(ctx_ctrl);
372 COPY_REG(ctx_timestamp);
373
374#undef COPY_REG
375
376 intel_gvt_hypervisor_write_gpa(vgpu,
377 workload->ring_context_gpa +
378 sizeof(*shadow_ring_context),
379 (void *)shadow_ring_context +
380 sizeof(*shadow_ring_context),
381 GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
382
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800383 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400384}
385
386static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
387{
388 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
389 struct intel_vgpu_workload *workload;
Changbin Du440a9b92017-01-05 16:49:03 +0800390 struct intel_vgpu *vgpu;
Zhi Wangbe1da702016-05-03 18:26:57 -0400391 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400392
393 mutex_lock(&gvt->lock);
394
395 workload = scheduler->current_workload[ring_id];
Changbin Du440a9b92017-01-05 16:49:03 +0800396 vgpu = workload->vgpu;
Zhi Wange4734052016-05-01 07:42:16 -0400397
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800398 /* For the workload w/ request, needs to wait for the context
399 * switch to make sure request is completed.
400 * For the workload w/o request, directly complete the workload.
401 */
402 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800403 struct drm_i915_private *dev_priv =
404 workload->vgpu->gvt->dev_priv;
405 struct intel_engine_cs *engine =
406 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400407 wait_event(workload->shadow_ctx_status_wq,
408 !atomic_read(&workload->shadow_ctx_active));
409
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800410 i915_gem_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400411
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800412 if (!workload->status && !vgpu->resetting) {
413 update_guest_context(workload);
414
415 for_each_set_bit(event, workload->pending_events,
416 INTEL_GVT_EVENT_MAX)
417 intel_vgpu_trigger_virtual_event(vgpu, event);
418 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800419 mutex_lock(&dev_priv->drm.struct_mutex);
420 /* unpin shadow ctx as the shadow_ctx update is done */
421 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
422 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400423 }
424
425 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
426 ring_id, workload, workload->status);
427
428 scheduler->current_workload[ring_id] = NULL;
429
Zhi Wange4734052016-05-01 07:42:16 -0400430 list_del_init(&workload->list);
431 workload->complete(workload);
432
Changbin Du440a9b92017-01-05 16:49:03 +0800433 atomic_dec(&vgpu->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400434 wake_up(&scheduler->workload_complete_wq);
435 mutex_unlock(&gvt->lock);
436}
437
438struct workload_thread_param {
439 struct intel_gvt *gvt;
440 int ring_id;
441};
442
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100443static DEFINE_MUTEX(scheduler_mutex);
444
Zhi Wange4734052016-05-01 07:42:16 -0400445static int workload_thread(void *priv)
446{
447 struct workload_thread_param *p = (struct workload_thread_param *)priv;
448 struct intel_gvt *gvt = p->gvt;
449 int ring_id = p->ring_id;
450 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
451 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500452 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400453 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800454 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
455 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800456 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400457
458 kfree(p);
459
460 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
461
462 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800463 add_wait_queue(&scheduler->waitq[ring_id], &wait);
464 do {
465 workload = pick_next_workload(gvt, ring_id);
466 if (workload)
467 break;
468 wait_woken(&wait, TASK_INTERRUPTIBLE,
469 MAX_SCHEDULE_TIMEOUT);
470 } while (!kthread_should_stop());
471 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400472
Du, Changbine45d7b72016-10-27 11:10:31 +0800473 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400474 break;
475
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100476 mutex_lock(&scheduler_mutex);
477
Zhi Wange4734052016-05-01 07:42:16 -0400478 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
479 workload->ring_id, workload,
480 workload->vgpu->id);
481
482 intel_runtime_pm_get(gvt->dev_priv);
483
Zhi Wange4734052016-05-01 07:42:16 -0400484 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
485 workload->ring_id, workload);
486
487 if (need_force_wake)
488 intel_uncore_forcewake_get(gvt->dev_priv,
489 FORCEWAKE_ALL);
490
Pei Zhang90d27a12016-11-14 18:02:57 +0800491 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400492 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800493 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100494
Zhi Wange4734052016-05-01 07:42:16 -0400495 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500496 vgpu = workload->vgpu;
497 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400498 goto complete;
499 }
500
501 gvt_dbg_sched("ring id %d wait workload %p\n",
502 workload->ring_id, workload);
Chris Wilson3dce2ac2017-03-08 22:08:08 +0000503 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400504
505complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800506 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400507 workload, workload->status);
508
Changbin Du2e51ef32017-01-05 13:28:05 +0800509 complete_current_workload(gvt, ring_id);
510
Zhi Wange4734052016-05-01 07:42:16 -0400511 if (need_force_wake)
512 intel_uncore_forcewake_put(gvt->dev_priv,
513 FORCEWAKE_ALL);
514
Zhi Wange4734052016-05-01 07:42:16 -0400515 intel_runtime_pm_put(gvt->dev_priv);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100516
517 mutex_unlock(&scheduler_mutex);
518
Zhi Wange4734052016-05-01 07:42:16 -0400519 }
520 return 0;
521}
522
523void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
524{
525 struct intel_gvt *gvt = vgpu->gvt;
526 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
527
528 if (atomic_read(&vgpu->running_workload_num)) {
529 gvt_dbg_sched("wait vgpu idle\n");
530
531 wait_event(scheduler->workload_complete_wq,
532 !atomic_read(&vgpu->running_workload_num));
533 }
534}
535
536void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
537{
538 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800539 struct intel_engine_cs *engine;
540 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400541
542 gvt_dbg_core("clean workload scheduler\n");
543
Changbin Du3fc03062017-03-13 10:47:11 +0800544 for_each_engine(engine, gvt->dev_priv, i) {
545 atomic_notifier_chain_unregister(
546 &engine->context_status_notifier,
547 &gvt->shadow_ctx_notifier_block[i]);
548 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400549 }
550}
551
552int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
553{
554 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
555 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800556 struct intel_engine_cs *engine;
557 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400558 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400559
560 gvt_dbg_core("init workload scheduler\n");
561
562 init_waitqueue_head(&scheduler->workload_complete_wq);
563
Changbin Du3fc03062017-03-13 10:47:11 +0800564 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400565 init_waitqueue_head(&scheduler->waitq[i]);
566
567 param = kzalloc(sizeof(*param), GFP_KERNEL);
568 if (!param) {
569 ret = -ENOMEM;
570 goto err;
571 }
572
573 param->gvt = gvt;
574 param->ring_id = i;
575
576 scheduler->thread[i] = kthread_run(workload_thread, param,
577 "gvt workload %d", i);
578 if (IS_ERR(scheduler->thread[i])) {
579 gvt_err("fail to create workload thread\n");
580 ret = PTR_ERR(scheduler->thread[i]);
581 goto err;
582 }
Changbin Du3fc03062017-03-13 10:47:11 +0800583
584 gvt->shadow_ctx_notifier_block[i].notifier_call =
585 shadow_context_status_change;
586 atomic_notifier_chain_register(&engine->context_status_notifier,
587 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400588 }
589 return 0;
590err:
591 intel_gvt_clean_workload_scheduler(gvt);
592 kfree(param);
593 param = NULL;
594 return ret;
595}
596
597void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
598{
Chris Wilson70ffe992016-12-18 15:37:22 +0000599 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -0400600}
601
602int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
603{
604 atomic_set(&vgpu->running_workload_num, 0);
605
606 vgpu->shadow_ctx = i915_gem_context_create_gvt(
607 &vgpu->gvt->dev_priv->drm);
608 if (IS_ERR(vgpu->shadow_ctx))
609 return PTR_ERR(vgpu->shadow_ctx);
610
611 vgpu->shadow_ctx->engine[RCS].initialised = true;
612
Zhi Wange4734052016-05-01 07:42:16 -0400613 return 0;
614}