blob: 649ef280cc9a5bc10f4bebdc2f43c27e5249bd7f [file] [log] [blame]
Zhi Wang82d375d2016-07-05 12:40:49 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
31 *
32 */
33
34#include "i915_drv.h"
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080035#include "gvt.h"
36#include "i915_pvinfo.h"
Zhi Wang82d375d2016-07-05 12:40:49 -040037
Ping Gao23736d12016-10-26 09:38:52 +080038void populate_pvinfo_page(struct intel_vgpu *vgpu)
Zhi Wang82d375d2016-07-05 12:40:49 -040039{
40 /* setup the ballooning information */
41 vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
42 vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
43 vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
44 vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
45 vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
46 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
47 vgpu_aperture_gmadr_base(vgpu);
48 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
49 vgpu_aperture_sz(vgpu);
50 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
51 vgpu_hidden_gmadr_base(vgpu);
52 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
53 vgpu_hidden_sz(vgpu);
54
55 vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
56
57 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
58 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
59 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
60 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
61 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
62 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
63
64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
65}
66
Zhenyu Wang191020b2017-02-23 14:46:23 +080067static struct {
68 unsigned int low_mm;
69 unsigned int high_mm;
70 unsigned int fence;
Zhenyu Wangd1a513b2017-02-24 10:58:21 +080071 enum intel_vgpu_edid edid;
Zhenyu Wang191020b2017-02-23 14:46:23 +080072 char *name;
73} vgpu_types[] = {
74/* Fixed vGPU type table */
Zhenyu Wang729a0cd2017-03-27 17:41:02 +080075 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
Zhenyu Wangd1a513b2017-02-24 10:58:21 +080076 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
Zhenyu Wang191020b2017-02-23 14:46:23 +080079};
80
Zhi Wang82d375d2016-07-05 12:40:49 -040081/**
Zhenyu Wang1f31c822016-11-03 18:38:31 +080082 * intel_gvt_init_vgpu_types - initialize vGPU type list
83 * @gvt : GVT device
84 *
85 * Initialize vGPU type list based on available resource.
86 *
87 */
88int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
89{
90 unsigned int num_types;
Zhenyu Wang2d6ceb82017-01-13 15:36:17 +080091 unsigned int i, low_avail, high_avail;
Zhenyu Wang1f31c822016-11-03 18:38:31 +080092 unsigned int min_low;
93
94 /* vGPU type name is defined as GVTg_Vx_y which contains
Zhenyu Wang191020b2017-02-23 14:46:23 +080095 * physical GPU generation type (e.g V4 as BDW server, V5 as
96 * SKL server).
Zhenyu Wang1f31c822016-11-03 18:38:31 +080097 *
98 * Depend on physical SKU resource, might see vGPU types like
99 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
100 * different types of vGPU on same physical GPU depending on
101 * available resource. Each vGPU type will have "avail_instance"
102 * to indicate how many vGPU instance can be created for this
103 * type.
104 *
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800105 */
Zhenyu Wang2d6ceb82017-01-13 15:36:17 +0800106 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
107 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
Zhenyu Wang191020b2017-02-23 14:46:23 +0800108 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800109
110 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
111 GFP_KERNEL);
112 if (!gvt->types)
113 return -ENOMEM;
114
115 min_low = MB_TO_BYTES(32);
116 for (i = 0; i < num_types; ++i) {
Zhenyu Wang191020b2017-02-23 14:46:23 +0800117 if (low_avail / vgpu_types[i].low_mm == 0)
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800118 break;
Zhenyu Wang191020b2017-02-23 14:46:23 +0800119
120 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
121 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
122 gvt->types[i].fence = vgpu_types[i].fence;
Zhenyu Wangd1a513b2017-02-24 10:58:21 +0800123 gvt->types[i].resolution = vgpu_types[i].edid;
Zhenyu Wang191020b2017-02-23 14:46:23 +0800124 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
125 high_avail / vgpu_types[i].high_mm);
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800126
127 if (IS_GEN8(gvt->dev_priv))
Zhenyu Wang191020b2017-02-23 14:46:23 +0800128 sprintf(gvt->types[i].name, "GVTg_V4_%s",
129 vgpu_types[i].name);
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800130 else if (IS_GEN9(gvt->dev_priv))
Zhenyu Wang191020b2017-02-23 14:46:23 +0800131 sprintf(gvt->types[i].name, "GVTg_V5_%s",
132 vgpu_types[i].name);
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800133
Zhenyu Wangd1a513b2017-02-24 10:58:21 +0800134 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
Zhenyu Wang191020b2017-02-23 14:46:23 +0800135 i, gvt->types[i].name,
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800136 gvt->types[i].avail_instance,
137 gvt->types[i].low_gm_size,
Zhenyu Wangd1a513b2017-02-24 10:58:21 +0800138 gvt->types[i].high_gm_size, gvt->types[i].fence,
139 vgpu_edid_str(gvt->types[i].resolution));
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800140 }
141
142 gvt->num_types = i;
143 return 0;
144}
145
146void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
147{
148 kfree(gvt->types);
149}
150
151static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
152{
153 int i;
154 unsigned int low_gm_avail, high_gm_avail, fence_avail;
Zhenyu Wang191020b2017-02-23 14:46:23 +0800155 unsigned int low_gm_min, high_gm_min, fence_min;
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800156
157 /* Need to depend on maxium hw resource size but keep on
158 * static config for now.
159 */
Zhenyu Wang2d6ceb82017-01-13 15:36:17 +0800160 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800161 gvt->gm.vgpu_allocated_low_gm_size;
Zhenyu Wang2d6ceb82017-01-13 15:36:17 +0800162 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800163 gvt->gm.vgpu_allocated_high_gm_size;
164 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
165 gvt->fence.vgpu_allocated_fence_num;
166
167 for (i = 0; i < gvt->num_types; i++) {
168 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
169 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
170 fence_min = fence_avail / gvt->types[i].fence;
Zhenyu Wang191020b2017-02-23 14:46:23 +0800171 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
172 fence_min);
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800173
Zhenyu Wang191020b2017-02-23 14:46:23 +0800174 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
175 i, gvt->types[i].name,
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800176 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
177 gvt->types[i].high_gm_size, gvt->types[i].fence);
178 }
179}
180
181/**
Zhi Wangb79c52a2017-03-30 01:48:39 +0800182 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU
184 *
185 * This function is called when user wants to activate a virtual GPU.
186 *
187 */
188void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
189{
190 mutex_lock(&vgpu->gvt->lock);
191 vgpu->active = true;
192 mutex_unlock(&vgpu->gvt->lock);
193}
194
195/**
196 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
197 * @vgpu: virtual GPU
198 *
199 * This function is called when user wants to deactivate a virtual GPU.
200 * All virtual GPU runtime information will be destroyed.
201 *
202 */
203void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
204{
205 struct intel_gvt *gvt = vgpu->gvt;
206
207 mutex_lock(&gvt->lock);
208
209 vgpu->active = false;
210
211 if (atomic_read(&vgpu->running_workload_num)) {
212 mutex_unlock(&gvt->lock);
213 intel_gvt_wait_vgpu_idle(vgpu);
214 mutex_lock(&gvt->lock);
215 }
216
217 intel_vgpu_stop_schedule(vgpu);
218
219 mutex_unlock(&gvt->lock);
220}
221
222/**
Zhi Wang82d375d2016-07-05 12:40:49 -0400223 * intel_gvt_destroy_vgpu - destroy a virtual GPU
224 * @vgpu: virtual GPU
225 *
226 * This function is called when user wants to destroy a virtual GPU.
227 *
228 */
229void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
230{
231 struct intel_gvt *gvt = vgpu->gvt;
232
233 mutex_lock(&gvt->lock);
234
Zhi Wangb79c52a2017-03-30 01:48:39 +0800235 WARN(vgpu->active, "vGPU is still active!\n");
236
Zhi Wang82d375d2016-07-05 12:40:49 -0400237 idr_remove(&gvt->vgpu_idr, vgpu->id);
Zhi Wang4b639602016-05-01 17:09:58 -0400238 intel_vgpu_clean_sched_policy(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400239 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wang28c4c6c2016-05-01 05:22:47 -0400240 intel_vgpu_clean_execlist(vgpu);
Zhi Wang04d348a2016-04-25 18:28:56 -0400241 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400242 intel_vgpu_clean_opregion(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800243 intel_vgpu_clean_gtt(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400244 intel_gvt_hypervisor_detach_vgpu(vgpu);
245 intel_vgpu_free_resource(vgpu);
Changbin Ducdcc4342017-01-13 11:16:00 +0800246 intel_vgpu_clean_mmio(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400247 vfree(vgpu);
248
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800249 intel_gvt_update_vgpu_types(gvt);
Zhi Wang82d375d2016-07-05 12:40:49 -0400250 mutex_unlock(&gvt->lock);
251}
252
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800253static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
Zhi Wang82d375d2016-07-05 12:40:49 -0400254 struct intel_vgpu_creation_params *param)
255{
256 struct intel_vgpu *vgpu;
257 int ret;
258
259 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
260 param->handle, param->low_gm_sz, param->high_gm_sz,
261 param->fence_sz);
262
263 vgpu = vzalloc(sizeof(*vgpu));
264 if (!vgpu)
265 return ERR_PTR(-ENOMEM);
266
267 mutex_lock(&gvt->lock);
268
269 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
270 if (ret < 0)
271 goto out_free_vgpu;
272
273 vgpu->id = ret;
274 vgpu->handle = param->handle;
275 vgpu->gvt = gvt;
Zhi Wang17865712016-05-01 19:02:37 -0400276 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang82d375d2016-07-05 12:40:49 -0400277
Changbin Du536fc232017-01-13 11:15:58 +0800278 intel_vgpu_init_cfg_space(vgpu, param->primary);
Zhi Wang82d375d2016-07-05 12:40:49 -0400279
Changbin Ducdcc4342017-01-13 11:16:00 +0800280 ret = intel_vgpu_init_mmio(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400281 if (ret)
Jike Song4e537892017-01-06 15:16:22 +0800282 goto out_clean_idr;
Zhi Wang82d375d2016-07-05 12:40:49 -0400283
284 ret = intel_vgpu_alloc_resource(vgpu, param);
285 if (ret)
286 goto out_clean_vgpu_mmio;
287
288 populate_pvinfo_page(vgpu);
289
290 ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
291 if (ret)
292 goto out_clean_vgpu_resource;
293
Zhi Wang2707e442016-03-28 23:23:16 +0800294 ret = intel_vgpu_init_gtt(vgpu);
295 if (ret)
296 goto out_detach_hypervisor_vgpu;
297
Zhenyu Wangd1a513b2017-02-24 10:58:21 +0800298 ret = intel_vgpu_init_display(vgpu, param->resolution);
Zhi Wang04d348a2016-04-25 18:28:56 -0400299 if (ret)
Jike Song8f897432016-11-03 18:38:32 +0800300 goto out_clean_gtt;
Zhi Wang04d348a2016-04-25 18:28:56 -0400301
Zhi Wang8453d672016-05-01 02:48:25 -0400302 ret = intel_vgpu_init_execlist(vgpu);
303 if (ret)
304 goto out_clean_display;
305
Zhi Wange4734052016-05-01 07:42:16 -0400306 ret = intel_vgpu_init_gvt_context(vgpu);
307 if (ret)
308 goto out_clean_execlist;
309
Zhi Wang4b639602016-05-01 17:09:58 -0400310 ret = intel_vgpu_init_sched_policy(vgpu);
311 if (ret)
312 goto out_clean_shadow_ctx;
313
Zhi Wang82d375d2016-07-05 12:40:49 -0400314 mutex_unlock(&gvt->lock);
315
316 return vgpu;
317
Zhi Wang4b639602016-05-01 17:09:58 -0400318out_clean_shadow_ctx:
319 intel_vgpu_clean_gvt_context(vgpu);
Zhi Wange4734052016-05-01 07:42:16 -0400320out_clean_execlist:
321 intel_vgpu_clean_execlist(vgpu);
Zhi Wang8453d672016-05-01 02:48:25 -0400322out_clean_display:
323 intel_vgpu_clean_display(vgpu);
Zhi Wang4d60c5fd2016-07-20 01:14:38 -0400324out_clean_gtt:
325 intel_vgpu_clean_gtt(vgpu);
Zhi Wang2707e442016-03-28 23:23:16 +0800326out_detach_hypervisor_vgpu:
327 intel_gvt_hypervisor_detach_vgpu(vgpu);
Zhi Wang82d375d2016-07-05 12:40:49 -0400328out_clean_vgpu_resource:
329 intel_vgpu_free_resource(vgpu);
330out_clean_vgpu_mmio:
Changbin Ducdcc4342017-01-13 11:16:00 +0800331 intel_vgpu_clean_mmio(vgpu);
Jike Song4e537892017-01-06 15:16:22 +0800332out_clean_idr:
333 idr_remove(&gvt->vgpu_idr, vgpu->id);
Zhi Wang82d375d2016-07-05 12:40:49 -0400334out_free_vgpu:
335 vfree(vgpu);
336 mutex_unlock(&gvt->lock);
337 return ERR_PTR(ret);
338}
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800339
340/**
341 * intel_gvt_create_vgpu - create a virtual GPU
342 * @gvt: GVT device
343 * @type: type of the vGPU to create
344 *
345 * This function is called when user wants to create a virtual GPU.
346 *
347 * Returns:
348 * pointer to intel_vgpu, error pointer if failed.
349 */
350struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
351 struct intel_vgpu_type *type)
352{
353 struct intel_vgpu_creation_params param;
354 struct intel_vgpu *vgpu;
355
356 param.handle = 0;
Du, Changbine992fae2016-11-21 17:08:14 +0800357 param.primary = 1;
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800358 param.low_gm_sz = type->low_gm_size;
359 param.high_gm_sz = type->high_gm_size;
360 param.fence_sz = type->fence;
Zhenyu Wangd1a513b2017-02-24 10:58:21 +0800361 param.resolution = type->resolution;
Zhenyu Wang1f31c822016-11-03 18:38:31 +0800362
363 /* XXX current param based on MB */
364 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
365 param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
366
367 vgpu = __intel_gvt_create_vgpu(gvt, &param);
368 if (IS_ERR(vgpu))
369 return vgpu;
370
371 /* calculate left instance change for types */
372 intel_gvt_update_vgpu_types(gvt);
373
374 return vgpu;
375}
Jike Song9ec1e662016-11-03 18:38:35 +0800376
377/**
Changbin Ducfe65f42017-01-13 11:16:02 +0800378 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
379 * @vgpu: virtual GPU
380 * @dmlr: vGPU Device Model Level Reset or GT Reset
381 * @engine_mask: engines to reset for GT reset
382 *
383 * This function is called when user wants to reset a virtual GPU through
384 * device model reset or GT reset. The caller should hold the gvt lock.
385 *
386 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
387 * the whole vGPU to default state as when it is created. This vGPU function
388 * is required both for functionary and security concerns.The ultimate goal
389 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
390 * assign a vGPU to a virtual machine we must isse such reset first.
391 *
392 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
393 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
394 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
395 * the reset request. Guest driver can issue a GT reset by programming the
396 * virtual GDRST register to reset specific virtual GPU engine or all
397 * engines.
398 *
399 * The parameter dev_level is to identify if we will do DMLR or GT reset.
400 * The parameter engine_mask is to specific the engines that need to be
401 * resetted. If value ALL_ENGINES is given for engine_mask, it means
402 * the caller requests a full GT reset that we will reset all virtual
403 * GPU engines. For FLR, engine_mask is ignored.
404 */
405void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
406 unsigned int engine_mask)
407{
408 struct intel_gvt *gvt = vgpu->gvt;
409 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
410
411 gvt_dbg_core("------------------------------------------\n");
412 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
413 vgpu->id, dmlr, engine_mask);
414 vgpu->resetting = true;
415
416 intel_vgpu_stop_schedule(vgpu);
417 /*
418 * The current_vgpu will set to NULL after stopping the
419 * scheduler when the reset is triggered by current vgpu.
420 */
421 if (scheduler->current_vgpu == NULL) {
422 mutex_unlock(&gvt->lock);
423 intel_gvt_wait_vgpu_idle(vgpu);
424 mutex_lock(&gvt->lock);
425 }
426
427 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
428
429 /* full GPU reset or device model level reset */
430 if (engine_mask == ALL_ENGINES || dmlr) {
431 intel_vgpu_reset_gtt(vgpu, dmlr);
432 intel_vgpu_reset_resource(vgpu);
433 intel_vgpu_reset_mmio(vgpu);
434 populate_pvinfo_page(vgpu);
Changbin Du6294b612017-02-14 14:50:18 +0800435 intel_vgpu_reset_display(vgpu);
Changbin Ducfe65f42017-01-13 11:16:02 +0800436
Min Hefd64be62017-02-17 15:02:36 +0800437 if (dmlr) {
Changbin Ducfe65f42017-01-13 11:16:02 +0800438 intel_vgpu_reset_cfg_space(vgpu);
Min Hefd64be62017-02-17 15:02:36 +0800439 /* only reset the failsafe mode when dmlr reset */
440 vgpu->failsafe = false;
441 vgpu->pv_notified = false;
442 }
Changbin Ducfe65f42017-01-13 11:16:02 +0800443 }
444
445 vgpu->resetting = false;
446 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
447 gvt_dbg_core("------------------------------------------\n");
448}
449
450/**
451 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
Jike Song9ec1e662016-11-03 18:38:35 +0800452 * @vgpu: virtual GPU
453 *
454 * This function is called when user wants to reset a virtual GPU.
455 *
456 */
457void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
458{
Changbin Ducfe65f42017-01-13 11:16:02 +0800459 mutex_lock(&vgpu->gvt->lock);
460 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
461 mutex_unlock(&vgpu->gvt->lock);
Jike Song9ec1e662016-11-03 18:38:35 +0800462}