Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Zhi Wang <zhi.a.wang@intel.com> |
| 25 | * |
| 26 | * Contributors: |
| 27 | * Ping Gao <ping.a.gao@intel.com> |
| 28 | * Tina Zhang <tina.zhang@intel.com> |
| 29 | * Chanbin Du <changbin.du@intel.com> |
| 30 | * Min He <min.he@intel.com> |
| 31 | * Bing Niu <bing.niu@intel.com> |
| 32 | * Zhenyu Wang <zhenyuw@linux.intel.com> |
| 33 | * |
| 34 | */ |
| 35 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 36 | #include <linux/kthread.h> |
| 37 | |
Zhenyu Wang | feddf6e | 2016-10-20 17:15:03 +0800 | [diff] [blame] | 38 | #include "i915_drv.h" |
| 39 | #include "gvt.h" |
| 40 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 41 | #define RING_CTX_OFF(x) \ |
| 42 | offsetof(struct execlist_ring_context, x) |
| 43 | |
Du, Changbin | 999ccb4 | 2016-10-20 14:08:47 +0800 | [diff] [blame] | 44 | static void set_context_pdp_root_pointer( |
| 45 | struct execlist_ring_context *ring_context, |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 46 | u32 pdp[8]) |
| 47 | { |
| 48 | struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; |
| 49 | int i; |
| 50 | |
| 51 | for (i = 0; i < 8; i++) |
| 52 | pdp_pair[i].val = pdp[7 - i]; |
| 53 | } |
| 54 | |
| 55 | static int populate_shadow_context(struct intel_vgpu_workload *workload) |
| 56 | { |
| 57 | struct intel_vgpu *vgpu = workload->vgpu; |
| 58 | struct intel_gvt *gvt = vgpu->gvt; |
| 59 | int ring_id = workload->ring_id; |
| 60 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
| 61 | struct drm_i915_gem_object *ctx_obj = |
| 62 | shadow_ctx->engine[ring_id].state->obj; |
| 63 | struct execlist_ring_context *shadow_ring_context; |
| 64 | struct page *page; |
| 65 | void *dst; |
| 66 | unsigned long context_gpa, context_page_num; |
| 67 | int i; |
| 68 | |
| 69 | gvt_dbg_sched("ring id %d workload lrca %x", ring_id, |
| 70 | workload->ctx_desc.lrca); |
| 71 | |
| 72 | context_page_num = intel_lr_context_size( |
Zhenyu Wang | 1140f9e | 2016-10-18 09:40:07 +0800 | [diff] [blame] | 73 | gvt->dev_priv->engine[ring_id]); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 74 | |
| 75 | context_page_num = context_page_num >> PAGE_SHIFT; |
| 76 | |
| 77 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) |
| 78 | context_page_num = 19; |
| 79 | |
| 80 | i = 2; |
| 81 | |
| 82 | while (i < context_page_num) { |
| 83 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
| 84 | (u32)((workload->ctx_desc.lrca + i) << |
| 85 | GTT_PAGE_SHIFT)); |
| 86 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 87 | gvt_vgpu_err("Invalid guest context descriptor\n"); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 88 | return -EINVAL; |
| 89 | } |
| 90 | |
| 91 | page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 92 | dst = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 93 | intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, |
| 94 | GTT_PAGE_SIZE); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 95 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 96 | i++; |
| 97 | } |
| 98 | |
| 99 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 100 | shadow_ring_context = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 101 | |
| 102 | #define COPY_REG(name) \ |
| 103 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ |
| 104 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
| 105 | |
| 106 | COPY_REG(ctx_ctrl); |
| 107 | COPY_REG(ctx_timestamp); |
| 108 | |
| 109 | if (ring_id == RCS) { |
| 110 | COPY_REG(bb_per_ctx_ptr); |
| 111 | COPY_REG(rcs_indirect_ctx); |
| 112 | COPY_REG(rcs_indirect_ctx_offset); |
| 113 | } |
| 114 | #undef COPY_REG |
| 115 | |
| 116 | set_context_pdp_root_pointer(shadow_ring_context, |
| 117 | workload->shadow_mm->shadow_page_table); |
| 118 | |
| 119 | intel_gvt_hypervisor_read_gpa(vgpu, |
| 120 | workload->ring_context_gpa + |
| 121 | sizeof(*shadow_ring_context), |
| 122 | (void *)shadow_ring_context + |
| 123 | sizeof(*shadow_ring_context), |
| 124 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
| 125 | |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 126 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | static int shadow_context_status_change(struct notifier_block *nb, |
| 131 | unsigned long action, void *data) |
| 132 | { |
| 133 | struct intel_vgpu *vgpu = container_of(nb, |
| 134 | struct intel_vgpu, shadow_ctx_notifier_block); |
| 135 | struct drm_i915_gem_request *req = |
| 136 | (struct drm_i915_gem_request *)data; |
| 137 | struct intel_gvt_workload_scheduler *scheduler = |
| 138 | &vgpu->gvt->scheduler; |
| 139 | struct intel_vgpu_workload *workload = |
| 140 | scheduler->current_workload[req->engine->id]; |
| 141 | |
Chuanxiao Dong | 9272f73 | 2017-02-17 19:29:52 +0800 | [diff] [blame] | 142 | if (unlikely(!workload)) |
| 143 | return NOTIFY_OK; |
| 144 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 145 | switch (action) { |
| 146 | case INTEL_CONTEXT_SCHEDULE_IN: |
Zhi Wang | 1786571 | 2016-05-01 19:02:37 -0400 | [diff] [blame] | 147 | intel_gvt_load_render_mmio(workload->vgpu, |
| 148 | workload->ring_id); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 149 | atomic_set(&workload->shadow_ctx_active, 1); |
| 150 | break; |
| 151 | case INTEL_CONTEXT_SCHEDULE_OUT: |
Zhi Wang | 1786571 | 2016-05-01 19:02:37 -0400 | [diff] [blame] | 152 | intel_gvt_restore_render_mmio(workload->vgpu, |
| 153 | workload->ring_id); |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 154 | /* If the status is -EINPROGRESS means this workload |
| 155 | * doesn't meet any issue during dispatching so when |
| 156 | * get the SCHEDULE_OUT set the status to be zero for |
| 157 | * good. If the status is NOT -EINPROGRESS means there |
| 158 | * is something wrong happened during dispatching and |
| 159 | * the status should not be set to zero |
| 160 | */ |
| 161 | if (workload->status == -EINPROGRESS) |
| 162 | workload->status = 0; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 163 | atomic_set(&workload->shadow_ctx_active, 0); |
| 164 | break; |
| 165 | default: |
| 166 | WARN_ON(1); |
| 167 | return NOTIFY_OK; |
| 168 | } |
| 169 | wake_up(&workload->shadow_ctx_status_wq); |
| 170 | return NOTIFY_OK; |
| 171 | } |
| 172 | |
| 173 | static int dispatch_workload(struct intel_vgpu_workload *workload) |
| 174 | { |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 175 | int ring_id = workload->ring_id; |
| 176 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
| 177 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame^] | 178 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 179 | struct drm_i915_gem_request *rq; |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 180 | struct intel_vgpu *vgpu = workload->vgpu; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 181 | int ret; |
| 182 | |
| 183 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
| 184 | ring_id, workload); |
| 185 | |
Zhenyu Wang | 03806ed | 2017-02-13 17:07:19 +0800 | [diff] [blame] | 186 | shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); |
| 187 | shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 188 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
| 189 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 190 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 191 | |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame^] | 192 | /* pin shadow context by gvt even the shadow context will be pinned |
| 193 | * when i915 alloc request. That is because gvt will update the guest |
| 194 | * context from shadow context when workload is completed, and at that |
| 195 | * moment, i915 may already unpined the shadow context to make the |
| 196 | * shadow_ctx pages invalid. So gvt need to pin itself. After update |
| 197 | * the guest context, gvt can unpin the shadow_ctx safely. |
| 198 | */ |
| 199 | ret = engine->context_pin(engine, shadow_ctx); |
| 200 | if (ret) { |
| 201 | gvt_vgpu_err("fail to pin shadow context\n"); |
| 202 | workload->status = ret; |
| 203 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 204 | return ret; |
| 205 | } |
| 206 | |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 207 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
| 208 | if (IS_ERR(rq)) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 209 | gvt_vgpu_err("fail to allocate gem request\n"); |
Zhenyu Wang | 53d6f812 | 2016-11-24 15:55:49 +0800 | [diff] [blame] | 210 | ret = PTR_ERR(rq); |
| 211 | goto out; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 212 | } |
| 213 | |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 214 | gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); |
| 215 | |
| 216 | workload->req = i915_gem_request_get(rq); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 217 | |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 218 | ret = intel_gvt_scan_and_shadow_workload(workload); |
| 219 | if (ret) |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 220 | goto out; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 221 | |
Tina Zhang | 17f1b1a | 2017-03-15 23:16:01 -0400 | [diff] [blame] | 222 | if ((workload->ring_id == RCS) && |
| 223 | (workload->wa_ctx.indirect_ctx.size != 0)) { |
| 224 | ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); |
| 225 | if (ret) |
| 226 | goto out; |
| 227 | } |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 228 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 229 | ret = populate_shadow_context(workload); |
| 230 | if (ret) |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 231 | goto out; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 232 | |
| 233 | if (workload->prepare) { |
| 234 | ret = workload->prepare(workload); |
| 235 | if (ret) |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 236 | goto out; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 237 | } |
| 238 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 239 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", |
| 240 | ring_id, workload->req); |
| 241 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 242 | ret = 0; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 243 | workload->dispatched = true; |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 244 | out: |
| 245 | if (ret) |
| 246 | workload->status = ret; |
Chris Wilson | 0eb742d | 2016-10-20 17:29:36 +0800 | [diff] [blame] | 247 | |
Zhenyu Wang | 53d6f812 | 2016-11-24 15:55:49 +0800 | [diff] [blame] | 248 | if (!IS_ERR_OR_NULL(rq)) |
| 249 | i915_add_request_no_flush(rq); |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame^] | 250 | else |
| 251 | engine->context_unpin(engine, shadow_ctx); |
| 252 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 253 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 254 | return ret; |
| 255 | } |
| 256 | |
| 257 | static struct intel_vgpu_workload *pick_next_workload( |
| 258 | struct intel_gvt *gvt, int ring_id) |
| 259 | { |
| 260 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 261 | struct intel_vgpu_workload *workload = NULL; |
| 262 | |
| 263 | mutex_lock(&gvt->lock); |
| 264 | |
| 265 | /* |
| 266 | * no current vgpu / will be scheduled out / no workload |
| 267 | * bail out |
| 268 | */ |
| 269 | if (!scheduler->current_vgpu) { |
| 270 | gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); |
| 271 | goto out; |
| 272 | } |
| 273 | |
| 274 | if (scheduler->need_reschedule) { |
| 275 | gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); |
| 276 | goto out; |
| 277 | } |
| 278 | |
| 279 | if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { |
| 280 | gvt_dbg_sched("ring id %d stop - no available workload\n", |
| 281 | ring_id); |
| 282 | goto out; |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * still have current workload, maybe the workload disptacher |
| 287 | * fail to submit it for some reason, resubmit it. |
| 288 | */ |
| 289 | if (scheduler->current_workload[ring_id]) { |
| 290 | workload = scheduler->current_workload[ring_id]; |
| 291 | gvt_dbg_sched("ring id %d still have current workload %p\n", |
| 292 | ring_id, workload); |
| 293 | goto out; |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * pick a workload as current workload |
| 298 | * once current workload is set, schedule policy routines |
| 299 | * will wait the current workload is finished when trying to |
| 300 | * schedule out a vgpu. |
| 301 | */ |
| 302 | scheduler->current_workload[ring_id] = container_of( |
| 303 | workload_q_head(scheduler->current_vgpu, ring_id)->next, |
| 304 | struct intel_vgpu_workload, list); |
| 305 | |
| 306 | workload = scheduler->current_workload[ring_id]; |
| 307 | |
| 308 | gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); |
| 309 | |
| 310 | atomic_inc(&workload->vgpu->running_workload_num); |
| 311 | out: |
| 312 | mutex_unlock(&gvt->lock); |
| 313 | return workload; |
| 314 | } |
| 315 | |
| 316 | static void update_guest_context(struct intel_vgpu_workload *workload) |
| 317 | { |
| 318 | struct intel_vgpu *vgpu = workload->vgpu; |
| 319 | struct intel_gvt *gvt = vgpu->gvt; |
| 320 | int ring_id = workload->ring_id; |
| 321 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
| 322 | struct drm_i915_gem_object *ctx_obj = |
| 323 | shadow_ctx->engine[ring_id].state->obj; |
| 324 | struct execlist_ring_context *shadow_ring_context; |
| 325 | struct page *page; |
| 326 | void *src; |
| 327 | unsigned long context_gpa, context_page_num; |
| 328 | int i; |
| 329 | |
| 330 | gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, |
| 331 | workload->ctx_desc.lrca); |
| 332 | |
| 333 | context_page_num = intel_lr_context_size( |
Zhenyu Wang | 1140f9e | 2016-10-18 09:40:07 +0800 | [diff] [blame] | 334 | gvt->dev_priv->engine[ring_id]); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 335 | |
| 336 | context_page_num = context_page_num >> PAGE_SHIFT; |
| 337 | |
| 338 | if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) |
| 339 | context_page_num = 19; |
| 340 | |
| 341 | i = 2; |
| 342 | |
| 343 | while (i < context_page_num) { |
| 344 | context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
| 345 | (u32)((workload->ctx_desc.lrca + i) << |
| 346 | GTT_PAGE_SHIFT)); |
| 347 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 348 | gvt_vgpu_err("invalid guest context descriptor\n"); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 349 | return; |
| 350 | } |
| 351 | |
| 352 | page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 353 | src = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 354 | intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, |
| 355 | GTT_PAGE_SIZE); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 356 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 357 | i++; |
| 358 | } |
| 359 | |
| 360 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + |
| 361 | RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); |
| 362 | |
| 363 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 364 | shadow_ring_context = kmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 365 | |
| 366 | #define COPY_REG(name) \ |
| 367 | intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ |
| 368 | RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
| 369 | |
| 370 | COPY_REG(ctx_ctrl); |
| 371 | COPY_REG(ctx_timestamp); |
| 372 | |
| 373 | #undef COPY_REG |
| 374 | |
| 375 | intel_gvt_hypervisor_write_gpa(vgpu, |
| 376 | workload->ring_context_gpa + |
| 377 | sizeof(*shadow_ring_context), |
| 378 | (void *)shadow_ring_context + |
| 379 | sizeof(*shadow_ring_context), |
| 380 | GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
| 381 | |
Xiaoguang Chen | c754936 | 2016-11-03 18:38:30 +0800 | [diff] [blame] | 382 | kunmap(page); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | static void complete_current_workload(struct intel_gvt *gvt, int ring_id) |
| 386 | { |
| 387 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 388 | struct intel_vgpu_workload *workload; |
Changbin Du | 440a9b9 | 2017-01-05 16:49:03 +0800 | [diff] [blame] | 389 | struct intel_vgpu *vgpu; |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 390 | int event; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 391 | |
| 392 | mutex_lock(&gvt->lock); |
| 393 | |
| 394 | workload = scheduler->current_workload[ring_id]; |
Changbin Du | 440a9b9 | 2017-01-05 16:49:03 +0800 | [diff] [blame] | 395 | vgpu = workload->vgpu; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 396 | |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 397 | /* For the workload w/ request, needs to wait for the context |
| 398 | * switch to make sure request is completed. |
| 399 | * For the workload w/o request, directly complete the workload. |
| 400 | */ |
| 401 | if (workload->req) { |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame^] | 402 | struct drm_i915_private *dev_priv = |
| 403 | workload->vgpu->gvt->dev_priv; |
| 404 | struct intel_engine_cs *engine = |
| 405 | dev_priv->engine[workload->ring_id]; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 406 | wait_event(workload->shadow_ctx_status_wq, |
| 407 | !atomic_read(&workload->shadow_ctx_active)); |
| 408 | |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 409 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
Zhi Wang | be1da70 | 2016-05-03 18:26:57 -0400 | [diff] [blame] | 410 | |
Chuanxiao Dong | 8f1117a | 2017-03-06 13:05:24 +0800 | [diff] [blame] | 411 | if (!workload->status && !vgpu->resetting) { |
| 412 | update_guest_context(workload); |
| 413 | |
| 414 | for_each_set_bit(event, workload->pending_events, |
| 415 | INTEL_GVT_EVENT_MAX) |
| 416 | intel_vgpu_trigger_virtual_event(vgpu, event); |
| 417 | } |
Chuanxiao Dong | 3cd23b8 | 2017-03-16 09:47:58 +0800 | [diff] [blame^] | 418 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 419 | /* unpin shadow ctx as the shadow_ctx update is done */ |
| 420 | engine->context_unpin(engine, workload->vgpu->shadow_ctx); |
| 421 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 422 | } |
| 423 | |
| 424 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
| 425 | ring_id, workload, workload->status); |
| 426 | |
| 427 | scheduler->current_workload[ring_id] = NULL; |
| 428 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 429 | list_del_init(&workload->list); |
| 430 | workload->complete(workload); |
| 431 | |
Changbin Du | 440a9b9 | 2017-01-05 16:49:03 +0800 | [diff] [blame] | 432 | atomic_dec(&vgpu->running_workload_num); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 433 | wake_up(&scheduler->workload_complete_wq); |
| 434 | mutex_unlock(&gvt->lock); |
| 435 | } |
| 436 | |
| 437 | struct workload_thread_param { |
| 438 | struct intel_gvt *gvt; |
| 439 | int ring_id; |
| 440 | }; |
| 441 | |
Chris Wilson | 66bbc3b | 2016-10-19 11:11:44 +0100 | [diff] [blame] | 442 | static DEFINE_MUTEX(scheduler_mutex); |
| 443 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 444 | static int workload_thread(void *priv) |
| 445 | { |
| 446 | struct workload_thread_param *p = (struct workload_thread_param *)priv; |
| 447 | struct intel_gvt *gvt = p->gvt; |
| 448 | int ring_id = p->ring_id; |
| 449 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 450 | struct intel_vgpu_workload *workload = NULL; |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 451 | struct intel_vgpu *vgpu = NULL; |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 452 | int ret; |
| 453 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 454 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 455 | |
| 456 | kfree(p); |
| 457 | |
| 458 | gvt_dbg_core("workload thread for ring %d started\n", ring_id); |
| 459 | |
| 460 | while (!kthread_should_stop()) { |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 461 | add_wait_queue(&scheduler->waitq[ring_id], &wait); |
| 462 | do { |
| 463 | workload = pick_next_workload(gvt, ring_id); |
| 464 | if (workload) |
| 465 | break; |
| 466 | wait_woken(&wait, TASK_INTERRUPTIBLE, |
| 467 | MAX_SCHEDULE_TIMEOUT); |
| 468 | } while (!kthread_should_stop()); |
| 469 | remove_wait_queue(&scheduler->waitq[ring_id], &wait); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 470 | |
Du, Changbin | e45d7b7 | 2016-10-27 11:10:31 +0800 | [diff] [blame] | 471 | if (!workload) |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 472 | break; |
| 473 | |
Chris Wilson | 66bbc3b | 2016-10-19 11:11:44 +0100 | [diff] [blame] | 474 | mutex_lock(&scheduler_mutex); |
| 475 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 476 | gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", |
| 477 | workload->ring_id, workload, |
| 478 | workload->vgpu->id); |
| 479 | |
| 480 | intel_runtime_pm_get(gvt->dev_priv); |
| 481 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 482 | gvt_dbg_sched("ring id %d will dispatch workload %p\n", |
| 483 | workload->ring_id, workload); |
| 484 | |
| 485 | if (need_force_wake) |
| 486 | intel_uncore_forcewake_get(gvt->dev_priv, |
| 487 | FORCEWAKE_ALL); |
| 488 | |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 489 | mutex_lock(&gvt->lock); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 490 | ret = dispatch_workload(workload); |
Pei Zhang | 90d27a1 | 2016-11-14 18:02:57 +0800 | [diff] [blame] | 491 | mutex_unlock(&gvt->lock); |
Chris Wilson | 66bbc3b | 2016-10-19 11:11:44 +0100 | [diff] [blame] | 492 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 493 | if (ret) { |
Tina Zhang | 695fbc0 | 2017-03-10 04:26:53 -0500 | [diff] [blame] | 494 | vgpu = workload->vgpu; |
| 495 | gvt_vgpu_err("fail to dispatch workload, skip\n"); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 496 | goto complete; |
| 497 | } |
| 498 | |
| 499 | gvt_dbg_sched("ring id %d wait workload %p\n", |
| 500 | workload->ring_id, workload); |
Chris Wilson | 3dce2ac | 2017-03-08 22:08:08 +0000 | [diff] [blame] | 501 | i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 502 | |
| 503 | complete: |
Changbin Du | 3ce3274 | 2017-02-09 10:13:16 +0800 | [diff] [blame] | 504 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 505 | workload, workload->status); |
| 506 | |
Changbin Du | 2e51ef3 | 2017-01-05 13:28:05 +0800 | [diff] [blame] | 507 | complete_current_workload(gvt, ring_id); |
| 508 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 509 | if (need_force_wake) |
| 510 | intel_uncore_forcewake_put(gvt->dev_priv, |
| 511 | FORCEWAKE_ALL); |
| 512 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 513 | intel_runtime_pm_put(gvt->dev_priv); |
Chris Wilson | 66bbc3b | 2016-10-19 11:11:44 +0100 | [diff] [blame] | 514 | |
| 515 | mutex_unlock(&scheduler_mutex); |
| 516 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 517 | } |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) |
| 522 | { |
| 523 | struct intel_gvt *gvt = vgpu->gvt; |
| 524 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 525 | |
| 526 | if (atomic_read(&vgpu->running_workload_num)) { |
| 527 | gvt_dbg_sched("wait vgpu idle\n"); |
| 528 | |
| 529 | wait_event(scheduler->workload_complete_wq, |
| 530 | !atomic_read(&vgpu->running_workload_num)); |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) |
| 535 | { |
| 536 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 537 | int i; |
| 538 | |
| 539 | gvt_dbg_core("clean workload scheduler\n"); |
| 540 | |
| 541 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
| 542 | if (scheduler->thread[i]) { |
| 543 | kthread_stop(scheduler->thread[i]); |
| 544 | scheduler->thread[i] = NULL; |
| 545 | } |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) |
| 550 | { |
| 551 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 552 | struct workload_thread_param *param = NULL; |
| 553 | int ret; |
| 554 | int i; |
| 555 | |
| 556 | gvt_dbg_core("init workload scheduler\n"); |
| 557 | |
| 558 | init_waitqueue_head(&scheduler->workload_complete_wq); |
| 559 | |
| 560 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
Zhenyu Wang | 0fac21e | 2016-10-20 13:30:33 +0800 | [diff] [blame] | 561 | /* check ring mask at init time */ |
| 562 | if (!HAS_ENGINE(gvt->dev_priv, i)) |
| 563 | continue; |
| 564 | |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 565 | init_waitqueue_head(&scheduler->waitq[i]); |
| 566 | |
| 567 | param = kzalloc(sizeof(*param), GFP_KERNEL); |
| 568 | if (!param) { |
| 569 | ret = -ENOMEM; |
| 570 | goto err; |
| 571 | } |
| 572 | |
| 573 | param->gvt = gvt; |
| 574 | param->ring_id = i; |
| 575 | |
| 576 | scheduler->thread[i] = kthread_run(workload_thread, param, |
| 577 | "gvt workload %d", i); |
| 578 | if (IS_ERR(scheduler->thread[i])) { |
| 579 | gvt_err("fail to create workload thread\n"); |
| 580 | ret = PTR_ERR(scheduler->thread[i]); |
| 581 | goto err; |
| 582 | } |
| 583 | } |
| 584 | return 0; |
| 585 | err: |
| 586 | intel_gvt_clean_workload_scheduler(gvt); |
| 587 | kfree(param); |
| 588 | param = NULL; |
| 589 | return ret; |
| 590 | } |
| 591 | |
| 592 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) |
| 593 | { |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 594 | atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, |
| 595 | &vgpu->shadow_ctx_notifier_block); |
| 596 | |
Chris Wilson | 70ffe99 | 2016-12-18 15:37:22 +0000 | [diff] [blame] | 597 | i915_gem_context_put_unlocked(vgpu->shadow_ctx); |
Zhi Wang | e473405 | 2016-05-01 07:42:16 -0400 | [diff] [blame] | 598 | } |
| 599 | |
| 600 | int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) |
| 601 | { |
| 602 | atomic_set(&vgpu->running_workload_num, 0); |
| 603 | |
| 604 | vgpu->shadow_ctx = i915_gem_context_create_gvt( |
| 605 | &vgpu->gvt->dev_priv->drm); |
| 606 | if (IS_ERR(vgpu->shadow_ctx)) |
| 607 | return PTR_ERR(vgpu->shadow_ctx); |
| 608 | |
| 609 | vgpu->shadow_ctx->engine[RCS].initialised = true; |
| 610 | |
| 611 | vgpu->shadow_ctx_notifier_block.notifier_call = |
| 612 | shadow_context_status_change; |
| 613 | |
| 614 | atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier, |
| 615 | &vgpu->shadow_ctx_notifier_block); |
| 616 | return 0; |
| 617 | } |