blob: 69ff489fb683158ac7de49efca99a2201847f602 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jesse Barnese5747e32014-06-12 08:35:47 -070030#include <linux/acpi.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010031#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/vt.h>
42#include <acpi/video.h>
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010045#include <drm/drm_crtc_helper.h>
David Howells760285e2012-10-02 18:01:07 +010046#include <drm/i915_drm.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include "i915_drv.h"
Chris Wilson990bbda2012-07-02 11:51:02 -030049#include "i915_trace.h"
Chris Wilson0673ad42016-06-24 14:00:22 +010050#include "i915_vgpu.h"
Kenneth Graunkef49f0582010-09-11 01:19:14 -070051#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Kristian Høgsberg112b7152009-01-04 16:55:33 -050053static struct drm_driver driver;
54
Chris Wilson0673ad42016-06-24 14:00:22 +010055static unsigned int i915_load_fail_count;
56
57bool __i915_inject_load_failure(const char *func, int line)
58{
59 if (i915_load_fail_count >= i915.inject_load_failure)
60 return false;
61
62 if (++i915_load_fail_count == i915.inject_load_failure) {
63 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
64 i915.inject_load_failure, func, line);
65 return true;
66 }
67
68 return false;
69}
70
71#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
72#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
73 "providing the dmesg log by booting with drm.debug=0xf"
74
75void
76__i915_printk(struct drm_i915_private *dev_priv, const char *level,
77 const char *fmt, ...)
78{
79 static bool shown_bug_once;
David Weinehallc49d13e2016-08-22 13:32:42 +030080 struct device *kdev = dev_priv->drm.dev;
Chris Wilson0673ad42016-06-24 14:00:22 +010081 bool is_error = level[1] <= KERN_ERR[1];
82 bool is_debug = level[1] == KERN_DEBUG[1];
83 struct va_format vaf;
84 va_list args;
85
86 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
87 return;
88
89 va_start(args, fmt);
90
91 vaf.fmt = fmt;
92 vaf.va = &args;
93
David Weinehallc49d13e2016-08-22 13:32:42 +030094 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
Chris Wilson0673ad42016-06-24 14:00:22 +010095 __builtin_return_address(0), &vaf);
96
97 if (is_error && !shown_bug_once) {
David Weinehallc49d13e2016-08-22 13:32:42 +030098 dev_notice(kdev, "%s", FDO_BUG_MSG);
Chris Wilson0673ad42016-06-24 14:00:22 +010099 shown_bug_once = true;
100 }
101
102 va_end(args);
103}
104
105static bool i915_error_injected(struct drm_i915_private *dev_priv)
106{
107 return i915.inject_load_failure &&
108 i915_load_fail_count == i915.inject_load_failure;
109}
110
111#define i915_load_error(dev_priv, fmt, ...) \
112 __i915_printk(dev_priv, \
113 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
114 fmt, ##__VA_ARGS__)
115
116
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100117static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
Robert Beckett30c964a2015-08-28 13:10:22 +0100118{
119 enum intel_pch ret = PCH_NOP;
120
121 /*
122 * In a virtualized passthrough environment we can be in a
123 * setup where the ISA bridge is not able to be passed through.
124 * In this case, a south bridge can be emulated and we have to
125 * make an educated guess as to which PCH is really there.
126 */
127
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100128 if (IS_GEN5(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100129 ret = PCH_IBX;
130 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100131 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100132 ret = PCH_CPT;
133 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100134 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100135 ret = PCH_LPT;
136 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100137 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
Robert Beckett30c964a2015-08-28 13:10:22 +0100138 ret = PCH_SPT;
139 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
140 }
141
142 return ret;
143}
144
Chris Wilson0673ad42016-06-24 14:00:22 +0100145static void intel_detect_pch(struct drm_device *dev)
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800146{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100147 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deakbcdb72a2014-02-14 20:23:54 +0200148 struct pci_dev *pch = NULL;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800149
Ben Widawskyce1bb322013-04-05 13:12:44 -0700150 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
151 * (which really amounts to a PCH but no South Display).
152 */
153 if (INTEL_INFO(dev)->num_pipes == 0) {
154 dev_priv->pch_type = PCH_NOP;
Ben Widawskyce1bb322013-04-05 13:12:44 -0700155 return;
156 }
157
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800158 /*
159 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
160 * make graphics device passthrough work easy for VMM, that only
161 * need to expose ISA bridge to let driver know the real hardware
162 * underneath. This is a requirement from virtualization team.
Rui Guo6a9c4b32013-06-19 21:10:23 +0800163 *
164 * In some virtualized environments (e.g. XEN), there is irrelevant
165 * ISA bridge in the system. To work reliably, we should scan trhough
166 * all the ISA bridge devices and check for the first match, instead
167 * of only checking the first one.
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800168 */
Imre Deakbcdb72a2014-02-14 20:23:54 +0200169 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800170 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
Imre Deakbcdb72a2014-02-14 20:23:54 +0200171 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
Paulo Zanoni17a303e2012-11-20 15:12:07 -0200172 dev_priv->pch_id = id;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800173
Jesse Barnes90711d52011-04-28 14:48:02 -0700174 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
175 dev_priv->pch_type = PCH_IBX;
176 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100177 WARN_ON(!IS_GEN5(dev_priv));
Jesse Barnes90711d52011-04-28 14:48:02 -0700178 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800179 dev_priv->pch_type = PCH_CPT;
180 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100181 WARN_ON(!(IS_GEN6(dev_priv) ||
182 IS_IVYBRIDGE(dev_priv)));
Jesse Barnesc7925132011-04-07 12:33:56 -0700183 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
184 /* PantherPoint is CPT compatible */
185 dev_priv->pch_type = PCH_CPT;
Jani Nikula492ab662013-10-01 12:12:33 +0300186 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100187 WARN_ON(!(IS_GEN6(dev_priv) ||
188 IS_IVYBRIDGE(dev_priv)));
Eugeni Dodonoveb877eb2012-03-29 12:32:20 -0300189 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
190 dev_priv->pch_type = PCH_LPT;
191 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
Tvrtko Ursulin86527442016-10-13 11:03:00 +0100192 WARN_ON(!IS_HASWELL(dev_priv) &&
193 !IS_BROADWELL(dev_priv));
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100194 WARN_ON(IS_HSW_ULT(dev_priv) ||
195 IS_BDW_ULT(dev_priv));
Ben Widawskye76e0632013-11-07 21:40:41 -0800196 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
197 dev_priv->pch_type = PCH_LPT;
198 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
Tvrtko Ursulin86527442016-10-13 11:03:00 +0100199 WARN_ON(!IS_HASWELL(dev_priv) &&
200 !IS_BROADWELL(dev_priv));
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100201 WARN_ON(!IS_HSW_ULT(dev_priv) &&
202 !IS_BDW_ULT(dev_priv));
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +0530203 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
204 dev_priv->pch_type = PCH_SPT;
205 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100206 WARN_ON(!IS_SKYLAKE(dev_priv) &&
207 !IS_KABYLAKE(dev_priv));
Satheeshakrishna Me7e7ea22014-04-09 11:08:57 +0530208 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
209 dev_priv->pch_type = PCH_SPT;
210 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100211 WARN_ON(!IS_SKYLAKE(dev_priv) &&
212 !IS_KABYLAKE(dev_priv));
Rodrigo Vivi22dea0b2016-07-01 17:07:12 -0700213 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
214 dev_priv->pch_type = PCH_KBP;
215 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
Tvrtko Ursulin08537232016-10-13 11:03:02 +0100216 WARN_ON(!IS_KABYLAKE(dev_priv));
Gerd Hoffmann39bfcd522015-11-26 12:03:51 +0100217 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
Jesse Barnes1844a662016-03-16 13:31:30 -0700218 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
Gerd Hoffmannf2e30512016-01-25 12:02:28 +0100219 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
Gerd Hoffmann94bb4892016-06-13 14:38:56 +0200220 pch->subsystem_vendor ==
221 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
222 pch->subsystem_device ==
223 PCI_SUBDEVICE_ID_QEMU)) {
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +0100224 dev_priv->pch_type =
225 intel_virt_detect_pch(dev_priv);
Imre Deakbcdb72a2014-02-14 20:23:54 +0200226 } else
227 continue;
228
Rui Guo6a9c4b32013-06-19 21:10:23 +0800229 break;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800230 }
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800231 }
Rui Guo6a9c4b32013-06-19 21:10:23 +0800232 if (!pch)
Imre Deakbcdb72a2014-02-14 20:23:54 +0200233 DRM_DEBUG_KMS("No PCH found.\n");
234
235 pci_dev_put(pch);
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800236}
237
Chris Wilson0673ad42016-06-24 14:00:22 +0100238static int i915_getparam(struct drm_device *dev, void *data,
239 struct drm_file *file_priv)
240{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100241 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300242 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100243 drm_i915_getparam_t *param = data;
244 int value;
245
246 switch (param->param) {
247 case I915_PARAM_IRQ_ACTIVE:
248 case I915_PARAM_ALLOW_BATCHBUFFER:
249 case I915_PARAM_LAST_DISPATCH:
250 /* Reject all old ums/dri params. */
251 return -ENODEV;
252 case I915_PARAM_CHIPSET_ID:
David Weinehall52a05c32016-08-22 13:32:44 +0300253 value = pdev->device;
Chris Wilson0673ad42016-06-24 14:00:22 +0100254 break;
255 case I915_PARAM_REVISION:
David Weinehall52a05c32016-08-22 13:32:44 +0300256 value = pdev->revision;
Chris Wilson0673ad42016-06-24 14:00:22 +0100257 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100258 case I915_PARAM_NUM_FENCES_AVAIL:
259 value = dev_priv->num_fence_regs;
260 break;
261 case I915_PARAM_HAS_OVERLAY:
262 value = dev_priv->overlay ? 1 : 0;
263 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100264 case I915_PARAM_HAS_BSD:
Akash Goel3b3f1652016-10-13 22:44:48 +0530265 value = !!dev_priv->engine[VCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100266 break;
267 case I915_PARAM_HAS_BLT:
Akash Goel3b3f1652016-10-13 22:44:48 +0530268 value = !!dev_priv->engine[BCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100269 break;
270 case I915_PARAM_HAS_VEBOX:
Akash Goel3b3f1652016-10-13 22:44:48 +0530271 value = !!dev_priv->engine[VECS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100272 break;
273 case I915_PARAM_HAS_BSD2:
Akash Goel3b3f1652016-10-13 22:44:48 +0530274 value = !!dev_priv->engine[VCS2];
Chris Wilson0673ad42016-06-24 14:00:22 +0100275 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100276 case I915_PARAM_HAS_EXEC_CONSTANTS:
David Weinehall16162472016-09-02 13:46:17 +0300277 value = INTEL_GEN(dev_priv) >= 4;
Chris Wilson0673ad42016-06-24 14:00:22 +0100278 break;
279 case I915_PARAM_HAS_LLC:
David Weinehall16162472016-09-02 13:46:17 +0300280 value = HAS_LLC(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100281 break;
282 case I915_PARAM_HAS_WT:
David Weinehall16162472016-09-02 13:46:17 +0300283 value = HAS_WT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100284 break;
285 case I915_PARAM_HAS_ALIASING_PPGTT:
David Weinehall16162472016-09-02 13:46:17 +0300286 value = USES_PPGTT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100287 break;
288 case I915_PARAM_HAS_SEMAPHORES:
Chris Wilson39df9192016-07-20 13:31:57 +0100289 value = i915.semaphores;
Chris Wilson0673ad42016-06-24 14:00:22 +0100290 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100291 case I915_PARAM_HAS_SECURE_BATCHES:
292 value = capable(CAP_SYS_ADMIN);
293 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100294 case I915_PARAM_CMD_PARSER_VERSION:
295 value = i915_cmd_parser_get_version(dev_priv);
296 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100297 case I915_PARAM_SUBSLICE_TOTAL:
Imre Deak57ec1712016-08-31 19:13:05 +0300298 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
Chris Wilson0673ad42016-06-24 14:00:22 +0100299 if (!value)
300 return -ENODEV;
301 break;
302 case I915_PARAM_EU_TOTAL:
Imre Deak43b67992016-08-31 19:13:02 +0300303 value = INTEL_INFO(dev_priv)->sseu.eu_total;
Chris Wilson0673ad42016-06-24 14:00:22 +0100304 if (!value)
305 return -ENODEV;
306 break;
307 case I915_PARAM_HAS_GPU_RESET:
308 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
309 break;
310 case I915_PARAM_HAS_RESOURCE_STREAMER:
David Weinehall16162472016-09-02 13:46:17 +0300311 value = HAS_RESOURCE_STREAMER(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100312 break;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100313 case I915_PARAM_HAS_POOLED_EU:
David Weinehall16162472016-09-02 13:46:17 +0300314 value = HAS_POOLED_EU(dev_priv);
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100315 break;
316 case I915_PARAM_MIN_EU_IN_POOL:
Imre Deak43b67992016-08-31 19:13:02 +0300317 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100318 break;
Chris Wilson4cc69072016-08-25 19:05:19 +0100319 case I915_PARAM_MMAP_GTT_VERSION:
320 /* Though we've started our numbering from 1, and so class all
321 * earlier versions as 0, in effect their value is undefined as
322 * the ioctl will report EINVAL for the unknown param!
323 */
324 value = i915_gem_mmap_gtt_version();
325 break;
David Weinehall16162472016-09-02 13:46:17 +0300326 case I915_PARAM_MMAP_VERSION:
327 /* Remember to bump this if the version changes! */
328 case I915_PARAM_HAS_GEM:
329 case I915_PARAM_HAS_PAGEFLIPPING:
330 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
331 case I915_PARAM_HAS_RELAXED_FENCING:
332 case I915_PARAM_HAS_COHERENT_RINGS:
333 case I915_PARAM_HAS_RELAXED_DELTA:
334 case I915_PARAM_HAS_GEN7_SOL_RESET:
335 case I915_PARAM_HAS_WAIT_TIMEOUT:
336 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
337 case I915_PARAM_HAS_PINNED_BATCHES:
338 case I915_PARAM_HAS_EXEC_NO_RELOC:
339 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
340 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
341 case I915_PARAM_HAS_EXEC_SOFTPIN:
342 /* For the time being all of these are always true;
343 * if some supported hardware does not have one of these
344 * features this value needs to be provided from
345 * INTEL_INFO(), a feature macro, or similar.
346 */
347 value = 1;
348 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100349 default:
350 DRM_DEBUG("Unknown parameter %d\n", param->param);
351 return -EINVAL;
352 }
353
Chris Wilsondda33002016-06-24 14:00:23 +0100354 if (put_user(value, param->value))
Chris Wilson0673ad42016-06-24 14:00:22 +0100355 return -EFAULT;
Chris Wilson0673ad42016-06-24 14:00:22 +0100356
357 return 0;
358}
359
360static int i915_get_bridge_dev(struct drm_device *dev)
361{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100362 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100363
364 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
365 if (!dev_priv->bridge_dev) {
366 DRM_ERROR("bridge device not found\n");
367 return -1;
368 }
369 return 0;
370}
371
372/* Allocate space for the MCH regs if needed, return nonzero on error */
373static int
374intel_alloc_mchbar_resource(struct drm_device *dev)
375{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100376 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100377 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
378 u32 temp_lo, temp_hi = 0;
379 u64 mchbar_addr;
380 int ret;
381
382 if (INTEL_INFO(dev)->gen >= 4)
383 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
384 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
385 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
386
387 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
388#ifdef CONFIG_PNP
389 if (mchbar_addr &&
390 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
391 return 0;
392#endif
393
394 /* Get some space for it */
395 dev_priv->mch_res.name = "i915 MCHBAR";
396 dev_priv->mch_res.flags = IORESOURCE_MEM;
397 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
398 &dev_priv->mch_res,
399 MCHBAR_SIZE, MCHBAR_SIZE,
400 PCIBIOS_MIN_MEM,
401 0, pcibios_align_resource,
402 dev_priv->bridge_dev);
403 if (ret) {
404 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
405 dev_priv->mch_res.start = 0;
406 return ret;
407 }
408
409 if (INTEL_INFO(dev)->gen >= 4)
410 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
411 upper_32_bits(dev_priv->mch_res.start));
412
413 pci_write_config_dword(dev_priv->bridge_dev, reg,
414 lower_32_bits(dev_priv->mch_res.start));
415 return 0;
416}
417
418/* Setup MCHBAR if possible, return true if we should disable it again */
419static void
420intel_setup_mchbar(struct drm_device *dev)
421{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100422 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100423 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
424 u32 temp;
425 bool enabled;
426
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100427 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100428 return;
429
430 dev_priv->mchbar_need_disable = false;
431
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100432 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100433 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
434 enabled = !!(temp & DEVEN_MCHBAR_EN);
435 } else {
436 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
437 enabled = temp & 1;
438 }
439
440 /* If it's already enabled, don't have to do anything */
441 if (enabled)
442 return;
443
444 if (intel_alloc_mchbar_resource(dev))
445 return;
446
447 dev_priv->mchbar_need_disable = true;
448
449 /* Space is allocated or reserved, so enable it. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100450 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100451 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
452 temp | DEVEN_MCHBAR_EN);
453 } else {
454 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
455 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
456 }
457}
458
459static void
460intel_teardown_mchbar(struct drm_device *dev)
461{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100462 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100463 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
464
465 if (dev_priv->mchbar_need_disable) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100466 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100467 u32 deven_val;
468
469 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
470 &deven_val);
471 deven_val &= ~DEVEN_MCHBAR_EN;
472 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
473 deven_val);
474 } else {
475 u32 mchbar_val;
476
477 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
478 &mchbar_val);
479 mchbar_val &= ~1;
480 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
481 mchbar_val);
482 }
483 }
484
485 if (dev_priv->mch_res.start)
486 release_resource(&dev_priv->mch_res);
487}
488
489/* true = enable decode, false = disable decoder */
490static unsigned int i915_vga_set_decode(void *cookie, bool state)
491{
492 struct drm_device *dev = cookie;
493
494 intel_modeset_vga_set_state(dev, state);
495 if (state)
496 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
497 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
498 else
499 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
500}
501
502static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
503{
504 struct drm_device *dev = pci_get_drvdata(pdev);
505 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
506
507 if (state == VGA_SWITCHEROO_ON) {
508 pr_info("switched on\n");
509 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
510 /* i915 resume handler doesn't set to D0 */
David Weinehall52a05c32016-08-22 13:32:44 +0300511 pci_set_power_state(pdev, PCI_D0);
Chris Wilson0673ad42016-06-24 14:00:22 +0100512 i915_resume_switcheroo(dev);
513 dev->switch_power_state = DRM_SWITCH_POWER_ON;
514 } else {
515 pr_info("switched off\n");
516 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
517 i915_suspend_switcheroo(dev, pmm);
518 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
519 }
520}
521
522static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
523{
524 struct drm_device *dev = pci_get_drvdata(pdev);
525
526 /*
527 * FIXME: open_count is protected by drm_global_mutex but that would lead to
528 * locking inversion with the driver load path. And the access here is
529 * completely racy anyway. So don't bother with locking for now.
530 */
531 return dev->open_count == 0;
532}
533
534static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
535 .set_gpu_state = i915_switcheroo_set_state,
536 .reprobe = NULL,
537 .can_switch = i915_switcheroo_can_switch,
538};
539
540static void i915_gem_fini(struct drm_device *dev)
541{
Chris Wilson0673ad42016-06-24 14:00:22 +0100542 mutex_lock(&dev->struct_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +0100543 i915_gem_cleanup_engines(dev);
544 i915_gem_context_fini(dev);
545 mutex_unlock(&dev->struct_mutex);
546
547 WARN_ON(!list_empty(&to_i915(dev)->context_list));
548}
549
550static int i915_load_modeset_init(struct drm_device *dev)
551{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100552 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300553 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100554 int ret;
555
556 if (i915_inject_load_failure())
557 return -ENODEV;
558
559 ret = intel_bios_init(dev_priv);
560 if (ret)
561 DRM_INFO("failed to find VBIOS tables\n");
562
563 /* If we have > 1 VGA cards, then we need to arbitrate access
564 * to the common VGA resources.
565 *
566 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
567 * then we do not take part in VGA arbitration and the
568 * vga_client_register() fails with -ENODEV.
569 */
David Weinehall52a05c32016-08-22 13:32:44 +0300570 ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
Chris Wilson0673ad42016-06-24 14:00:22 +0100571 if (ret && ret != -ENODEV)
572 goto out;
573
574 intel_register_dsm_handler();
575
David Weinehall52a05c32016-08-22 13:32:44 +0300576 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
Chris Wilson0673ad42016-06-24 14:00:22 +0100577 if (ret)
578 goto cleanup_vga_client;
579
580 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
581 intel_update_rawclk(dev_priv);
582
583 intel_power_domains_init_hw(dev_priv, false);
584
585 intel_csr_ucode_init(dev_priv);
586
587 ret = intel_irq_install(dev_priv);
588 if (ret)
589 goto cleanup_csr;
590
591 intel_setup_gmbus(dev);
592
593 /* Important: The output setup functions called by modeset_init need
594 * working irqs for e.g. gmbus and dp aux transfers. */
595 intel_modeset_init(dev);
596
597 intel_guc_init(dev);
598
599 ret = i915_gem_init(dev);
600 if (ret)
601 goto cleanup_irq;
602
603 intel_modeset_gem_init(dev);
604
605 if (INTEL_INFO(dev)->num_pipes == 0)
606 return 0;
607
608 ret = intel_fbdev_init(dev);
609 if (ret)
610 goto cleanup_gem;
611
612 /* Only enable hotplug handling once the fbdev is fully set up. */
613 intel_hpd_init(dev_priv);
614
615 drm_kms_helper_poll_init(dev);
616
617 return 0;
618
619cleanup_gem:
Imre Deak1c777c52016-10-12 17:46:37 +0300620 if (i915_gem_suspend(dev))
621 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +0100622 i915_gem_fini(dev);
623cleanup_irq:
624 intel_guc_fini(dev);
625 drm_irq_uninstall(dev);
626 intel_teardown_gmbus(dev);
627cleanup_csr:
628 intel_csr_ucode_fini(dev_priv);
629 intel_power_domains_fini(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300630 vga_switcheroo_unregister_client(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100631cleanup_vga_client:
David Weinehall52a05c32016-08-22 13:32:44 +0300632 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +0100633out:
634 return ret;
635}
636
637#if IS_ENABLED(CONFIG_FB)
638static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
639{
640 struct apertures_struct *ap;
Chris Wilson91c8a322016-07-05 10:40:23 +0100641 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100642 struct i915_ggtt *ggtt = &dev_priv->ggtt;
643 bool primary;
644 int ret;
645
646 ap = alloc_apertures(1);
647 if (!ap)
648 return -ENOMEM;
649
650 ap->ranges[0].base = ggtt->mappable_base;
651 ap->ranges[0].size = ggtt->mappable_end;
652
653 primary =
654 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
655
Daniel Vetter44adece2016-08-10 18:52:34 +0200656 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
Chris Wilson0673ad42016-06-24 14:00:22 +0100657
658 kfree(ap);
659
660 return ret;
661}
662#else
663static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
664{
665 return 0;
666}
667#endif
668
669#if !defined(CONFIG_VGA_CONSOLE)
670static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
671{
672 return 0;
673}
674#elif !defined(CONFIG_DUMMY_CONSOLE)
675static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
676{
677 return -ENODEV;
678}
679#else
680static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
681{
682 int ret = 0;
683
684 DRM_INFO("Replacing VGA console driver\n");
685
686 console_lock();
687 if (con_is_bound(&vga_con))
688 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
689 if (ret == 0) {
690 ret = do_unregister_con_driver(&vga_con);
691
692 /* Ignore "already unregistered". */
693 if (ret == -ENODEV)
694 ret = 0;
695 }
696 console_unlock();
697
698 return ret;
699}
700#endif
701
Chris Wilson0673ad42016-06-24 14:00:22 +0100702static void intel_init_dpio(struct drm_i915_private *dev_priv)
703{
704 /*
705 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
706 * CHV x1 PHY (DP/HDMI D)
707 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
708 */
709 if (IS_CHERRYVIEW(dev_priv)) {
710 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
711 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
712 } else if (IS_VALLEYVIEW(dev_priv)) {
713 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
714 }
715}
716
717static int i915_workqueues_init(struct drm_i915_private *dev_priv)
718{
719 /*
720 * The i915 workqueue is primarily used for batched retirement of
721 * requests (and thus managing bo) once the task has been completed
722 * by the GPU. i915_gem_retire_requests() is called directly when we
723 * need high-priority retirement, such as waiting for an explicit
724 * bo.
725 *
726 * It is also used for periodic low-priority events, such as
727 * idle-timers and recording error state.
728 *
729 * All tasks on the workqueue are expected to acquire the dev mutex
730 * so there is no point in running more than one instance of the
731 * workqueue at any time. Use an ordered one.
732 */
733 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
734 if (dev_priv->wq == NULL)
735 goto out_err;
736
737 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
738 if (dev_priv->hotplug.dp_wq == NULL)
739 goto out_free_wq;
740
Chris Wilson0673ad42016-06-24 14:00:22 +0100741 return 0;
742
Chris Wilson0673ad42016-06-24 14:00:22 +0100743out_free_wq:
744 destroy_workqueue(dev_priv->wq);
745out_err:
746 DRM_ERROR("Failed to allocate workqueues.\n");
747
748 return -ENOMEM;
749}
750
751static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
752{
Chris Wilson0673ad42016-06-24 14:00:22 +0100753 destroy_workqueue(dev_priv->hotplug.dp_wq);
754 destroy_workqueue(dev_priv->wq);
755}
756
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300757/*
758 * We don't keep the workarounds for pre-production hardware, so we expect our
759 * driver to fail on these machines in one way or another. A little warning on
760 * dmesg may help both the user and the bug triagers.
761 */
762static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
763{
764 if (IS_HSW_EARLY_SDV(dev_priv) ||
765 IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
766 DRM_ERROR("This is a pre-production stepping. "
767 "It may not be fully functional.\n");
768}
769
Chris Wilson0673ad42016-06-24 14:00:22 +0100770/**
771 * i915_driver_init_early - setup state not requiring device access
772 * @dev_priv: device private
773 *
774 * Initialize everything that is a "SW-only" state, that is state not
775 * requiring accessing the device or exposing the driver via kernel internal
776 * or userspace interfaces. Example steps belonging here: lock initialization,
777 * system memory allocation, setting up device specific attributes and
778 * function hooks not requiring accessing the device.
779 */
780static int i915_driver_init_early(struct drm_i915_private *dev_priv,
781 const struct pci_device_id *ent)
782{
783 const struct intel_device_info *match_info =
784 (struct intel_device_info *)ent->driver_data;
785 struct intel_device_info *device_info;
786 int ret = 0;
787
788 if (i915_inject_load_failure())
789 return -ENODEV;
790
791 /* Setup the write-once "constant" device info */
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100792 device_info = mkwrite_device_info(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100793 memcpy(device_info, match_info, sizeof(*device_info));
794 device_info->device_id = dev_priv->drm.pdev->device;
795
796 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
797 device_info->gen_mask = BIT(device_info->gen - 1);
798
799 spin_lock_init(&dev_priv->irq_lock);
800 spin_lock_init(&dev_priv->gpu_error.lock);
801 mutex_init(&dev_priv->backlight_lock);
802 spin_lock_init(&dev_priv->uncore.lock);
803 spin_lock_init(&dev_priv->mm.object_stat_lock);
804 spin_lock_init(&dev_priv->mmio_flip_lock);
805 mutex_init(&dev_priv->sb_lock);
806 mutex_init(&dev_priv->modeset_restore_lock);
807 mutex_init(&dev_priv->av_mutex);
808 mutex_init(&dev_priv->wm.wm_mutex);
809 mutex_init(&dev_priv->pps_mutex);
810
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100811 i915_memcpy_init_early(dev_priv);
812
Chris Wilson0673ad42016-06-24 14:00:22 +0100813 ret = i915_workqueues_init(dev_priv);
814 if (ret < 0)
815 return ret;
816
817 ret = intel_gvt_init(dev_priv);
818 if (ret < 0)
819 goto err_workqueues;
820
821 /* This must be called before any calls to HAS_PCH_* */
822 intel_detect_pch(&dev_priv->drm);
823
824 intel_pm_setup(&dev_priv->drm);
825 intel_init_dpio(dev_priv);
826 intel_power_domains_init(dev_priv);
827 intel_irq_init(dev_priv);
828 intel_init_display_hooks(dev_priv);
829 intel_init_clock_gating_hooks(dev_priv);
830 intel_init_audio_hooks(dev_priv);
831 i915_gem_load_init(&dev_priv->drm);
832
David Weinehall36cdd012016-08-22 13:59:31 +0300833 intel_display_crc_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100834
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100835 intel_device_info_dump(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100836
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300837 intel_detect_preproduction_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100838
839 return 0;
840
841err_workqueues:
842 i915_workqueues_cleanup(dev_priv);
843 return ret;
844}
845
846/**
847 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
848 * @dev_priv: device private
849 */
850static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
851{
Chris Wilson91c8a322016-07-05 10:40:23 +0100852 i915_gem_load_cleanup(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +0100853 i915_workqueues_cleanup(dev_priv);
854}
855
856static int i915_mmio_setup(struct drm_device *dev)
857{
858 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300859 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100860 int mmio_bar;
861 int mmio_size;
862
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100863 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100864 /*
865 * Before gen4, the registers and the GTT are behind different BARs.
866 * However, from gen4 onwards, the registers and the GTT are shared
867 * in the same BAR, so we want to restrict this ioremap from
868 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
869 * the register BAR remains the same size for all the earlier
870 * generations up to Ironlake.
871 */
872 if (INTEL_INFO(dev)->gen < 5)
873 mmio_size = 512 * 1024;
874 else
875 mmio_size = 2 * 1024 * 1024;
David Weinehall52a05c32016-08-22 13:32:44 +0300876 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
Chris Wilson0673ad42016-06-24 14:00:22 +0100877 if (dev_priv->regs == NULL) {
878 DRM_ERROR("failed to map registers\n");
879
880 return -EIO;
881 }
882
883 /* Try to make sure MCHBAR is enabled before poking at it */
884 intel_setup_mchbar(dev);
885
886 return 0;
887}
888
889static void i915_mmio_cleanup(struct drm_device *dev)
890{
891 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300892 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100893
894 intel_teardown_mchbar(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300895 pci_iounmap(pdev, dev_priv->regs);
Chris Wilson0673ad42016-06-24 14:00:22 +0100896}
897
898/**
899 * i915_driver_init_mmio - setup device MMIO
900 * @dev_priv: device private
901 *
902 * Setup minimal device state necessary for MMIO accesses later in the
903 * initialization sequence. The setup here should avoid any other device-wide
904 * side effects or exposing the driver via kernel internal or user space
905 * interfaces.
906 */
907static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
908{
Chris Wilson91c8a322016-07-05 10:40:23 +0100909 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +0100910 int ret;
911
912 if (i915_inject_load_failure())
913 return -ENODEV;
914
915 if (i915_get_bridge_dev(dev))
916 return -EIO;
917
918 ret = i915_mmio_setup(dev);
919 if (ret < 0)
920 goto put_bridge;
921
922 intel_uncore_init(dev_priv);
923
924 return 0;
925
926put_bridge:
927 pci_dev_put(dev_priv->bridge_dev);
928
929 return ret;
930}
931
932/**
933 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
934 * @dev_priv: device private
935 */
936static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
937{
Chris Wilson91c8a322016-07-05 10:40:23 +0100938 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +0100939
940 intel_uncore_fini(dev_priv);
941 i915_mmio_cleanup(dev);
942 pci_dev_put(dev_priv->bridge_dev);
943}
944
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100945static void intel_sanitize_options(struct drm_i915_private *dev_priv)
946{
947 i915.enable_execlists =
948 intel_sanitize_enable_execlists(dev_priv,
949 i915.enable_execlists);
950
951 /*
952 * i915.enable_ppgtt is read-only, so do an early pass to validate the
953 * user's requested state against the hardware/driver capabilities. We
954 * do this now so that we can print out any log messages once rather
955 * than every time we check intel_enable_ppgtt().
956 */
957 i915.enable_ppgtt =
958 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
959 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
Chris Wilson39df9192016-07-20 13:31:57 +0100960
961 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
962 DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100963}
964
Chris Wilson0673ad42016-06-24 14:00:22 +0100965/**
966 * i915_driver_init_hw - setup state requiring device access
967 * @dev_priv: device private
968 *
969 * Setup state that requires accessing the device, but doesn't require
970 * exposing the driver via kernel internal or userspace interfaces.
971 */
972static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
973{
David Weinehall52a05c32016-08-22 13:32:44 +0300974 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson91c8a322016-07-05 10:40:23 +0100975 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +0100976 int ret;
977
978 if (i915_inject_load_failure())
979 return -ENODEV;
980
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100981 intel_device_info_runtime_init(dev_priv);
982
983 intel_sanitize_options(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100984
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100985 ret = i915_ggtt_probe_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100986 if (ret)
987 return ret;
988
Chris Wilson0673ad42016-06-24 14:00:22 +0100989 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
990 * otherwise the vga fbdev driver falls over. */
991 ret = i915_kick_out_firmware_fb(dev_priv);
992 if (ret) {
993 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
994 goto out_ggtt;
995 }
996
997 ret = i915_kick_out_vgacon(dev_priv);
998 if (ret) {
999 DRM_ERROR("failed to remove conflicting VGA console\n");
1000 goto out_ggtt;
1001 }
1002
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001003 ret = i915_ggtt_init_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001004 if (ret)
1005 return ret;
1006
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001007 ret = i915_ggtt_enable_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001008 if (ret) {
1009 DRM_ERROR("failed to enable GGTT\n");
1010 goto out_ggtt;
1011 }
1012
David Weinehall52a05c32016-08-22 13:32:44 +03001013 pci_set_master(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001014
1015 /* overlay on gen2 is broken and can't address above 1G */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001016 if (IS_GEN2(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001017 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
Chris Wilson0673ad42016-06-24 14:00:22 +01001018 if (ret) {
1019 DRM_ERROR("failed to set DMA mask\n");
1020
1021 goto out_ggtt;
1022 }
1023 }
1024
Chris Wilson0673ad42016-06-24 14:00:22 +01001025 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1026 * using 32bit addressing, overwriting memory if HWS is located
1027 * above 4GB.
1028 *
1029 * The documentation also mentions an issue with undefined
1030 * behaviour if any general state is accessed within a page above 4GB,
1031 * which also needs to be handled carefully.
1032 */
1033 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001034 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Chris Wilson0673ad42016-06-24 14:00:22 +01001035
1036 if (ret) {
1037 DRM_ERROR("failed to set DMA mask\n");
1038
1039 goto out_ggtt;
1040 }
1041 }
1042
Chris Wilson0673ad42016-06-24 14:00:22 +01001043 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1044 PM_QOS_DEFAULT_VALUE);
1045
1046 intel_uncore_sanitize(dev_priv);
1047
1048 intel_opregion_setup(dev_priv);
1049
1050 i915_gem_load_init_fences(dev_priv);
1051
1052 /* On the 945G/GM, the chipset reports the MSI capability on the
1053 * integrated graphics even though the support isn't actually there
1054 * according to the published specs. It doesn't appear to function
1055 * correctly in testing on 945G.
1056 * This may be a side effect of MSI having been made available for PEG
1057 * and the registers being closely associated.
1058 *
1059 * According to chipset errata, on the 965GM, MSI interrupts may
1060 * be lost or delayed, but we use them anyways to avoid
1061 * stuck interrupts on some machines.
1062 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001063 if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001064 if (pci_enable_msi(pdev) < 0)
Chris Wilson0673ad42016-06-24 14:00:22 +01001065 DRM_DEBUG_DRIVER("can't enable MSI");
1066 }
1067
1068 return 0;
1069
1070out_ggtt:
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001071 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001072
1073 return ret;
1074}
1075
1076/**
1077 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1078 * @dev_priv: device private
1079 */
1080static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1081{
David Weinehall52a05c32016-08-22 13:32:44 +03001082 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001083
David Weinehall52a05c32016-08-22 13:32:44 +03001084 if (pdev->msi_enabled)
1085 pci_disable_msi(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001086
1087 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001088 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001089}
1090
1091/**
1092 * i915_driver_register - register the driver with the rest of the system
1093 * @dev_priv: device private
1094 *
1095 * Perform any steps necessary to make the driver available via kernel
1096 * internal or userspace interfaces.
1097 */
1098static void i915_driver_register(struct drm_i915_private *dev_priv)
1099{
Chris Wilson91c8a322016-07-05 10:40:23 +01001100 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +01001101
1102 i915_gem_shrinker_init(dev_priv);
1103
1104 /*
1105 * Notify a valid surface after modesetting,
1106 * when running inside a VM.
1107 */
1108 if (intel_vgpu_active(dev_priv))
1109 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1110
1111 /* Reveal our presence to userspace */
1112 if (drm_dev_register(dev, 0) == 0) {
1113 i915_debugfs_register(dev_priv);
David Weinehall694c2822016-08-22 13:32:43 +03001114 i915_setup_sysfs(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001115 } else
1116 DRM_ERROR("Failed to register driver for userspace access!\n");
1117
1118 if (INTEL_INFO(dev_priv)->num_pipes) {
1119 /* Must be done after probing outputs */
1120 intel_opregion_register(dev_priv);
1121 acpi_video_register();
1122 }
1123
1124 if (IS_GEN5(dev_priv))
1125 intel_gpu_ips_init(dev_priv);
1126
1127 i915_audio_component_init(dev_priv);
1128
1129 /*
1130 * Some ports require correctly set-up hpd registers for detection to
1131 * work properly (leading to ghost connected connector status), e.g. VGA
1132 * on gm45. Hence we can only set up the initial fbdev config after hpd
1133 * irqs are fully enabled. We do it last so that the async config
1134 * cannot run before the connectors are registered.
1135 */
1136 intel_fbdev_initial_config_async(dev);
1137}
1138
1139/**
1140 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1141 * @dev_priv: device private
1142 */
1143static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1144{
1145 i915_audio_component_cleanup(dev_priv);
1146
1147 intel_gpu_ips_teardown();
1148 acpi_video_unregister();
1149 intel_opregion_unregister(dev_priv);
1150
David Weinehall694c2822016-08-22 13:32:43 +03001151 i915_teardown_sysfs(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001152 i915_debugfs_unregister(dev_priv);
Chris Wilson91c8a322016-07-05 10:40:23 +01001153 drm_dev_unregister(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001154
1155 i915_gem_shrinker_cleanup(dev_priv);
1156}
1157
1158/**
1159 * i915_driver_load - setup chip and create an initial config
1160 * @dev: DRM device
1161 * @flags: startup flags
1162 *
1163 * The driver load routine has to do several things:
1164 * - drive output discovery via intel_modeset_init()
1165 * - initialize the memory manager
1166 * - allocate initial config memory
1167 * - setup the DRM framebuffer with the allocated memory
1168 */
Chris Wilson42f55512016-06-24 14:00:26 +01001169int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
Chris Wilson0673ad42016-06-24 14:00:22 +01001170{
1171 struct drm_i915_private *dev_priv;
1172 int ret;
1173
Chris Wilsona09d0ba2016-06-24 14:00:27 +01001174 if (i915.nuclear_pageflip)
1175 driver.driver_features |= DRIVER_ATOMIC;
1176
Chris Wilson0673ad42016-06-24 14:00:22 +01001177 ret = -ENOMEM;
1178 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1179 if (dev_priv)
1180 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1181 if (ret) {
1182 dev_printk(KERN_ERR, &pdev->dev,
1183 "[" DRM_NAME ":%s] allocation failed\n", __func__);
1184 kfree(dev_priv);
1185 return ret;
1186 }
1187
Chris Wilson0673ad42016-06-24 14:00:22 +01001188 dev_priv->drm.pdev = pdev;
1189 dev_priv->drm.dev_private = dev_priv;
Chris Wilson0673ad42016-06-24 14:00:22 +01001190
1191 ret = pci_enable_device(pdev);
1192 if (ret)
1193 goto out_free_priv;
1194
1195 pci_set_drvdata(pdev, &dev_priv->drm);
1196
1197 ret = i915_driver_init_early(dev_priv, ent);
1198 if (ret < 0)
1199 goto out_pci_disable;
1200
1201 intel_runtime_pm_get(dev_priv);
1202
1203 ret = i915_driver_init_mmio(dev_priv);
1204 if (ret < 0)
1205 goto out_runtime_pm_put;
1206
1207 ret = i915_driver_init_hw(dev_priv);
1208 if (ret < 0)
1209 goto out_cleanup_mmio;
1210
1211 /*
1212 * TODO: move the vblank init and parts of modeset init steps into one
1213 * of the i915_driver_init_/i915_driver_register functions according
1214 * to the role/effect of the given init step.
1215 */
1216 if (INTEL_INFO(dev_priv)->num_pipes) {
Chris Wilson91c8a322016-07-05 10:40:23 +01001217 ret = drm_vblank_init(&dev_priv->drm,
Chris Wilson0673ad42016-06-24 14:00:22 +01001218 INTEL_INFO(dev_priv)->num_pipes);
1219 if (ret)
1220 goto out_cleanup_hw;
1221 }
1222
Chris Wilson91c8a322016-07-05 10:40:23 +01001223 ret = i915_load_modeset_init(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001224 if (ret < 0)
1225 goto out_cleanup_vblank;
1226
1227 i915_driver_register(dev_priv);
1228
1229 intel_runtime_pm_enable(dev_priv);
1230
Chris Wilsonbc5ca472016-08-25 08:23:14 +01001231 /* Everything is in place, we can now relax! */
1232 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1233 driver.name, driver.major, driver.minor, driver.patchlevel,
1234 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
1235
Chris Wilson0673ad42016-06-24 14:00:22 +01001236 intel_runtime_pm_put(dev_priv);
1237
1238 return 0;
1239
1240out_cleanup_vblank:
Chris Wilson91c8a322016-07-05 10:40:23 +01001241 drm_vblank_cleanup(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001242out_cleanup_hw:
1243 i915_driver_cleanup_hw(dev_priv);
1244out_cleanup_mmio:
1245 i915_driver_cleanup_mmio(dev_priv);
1246out_runtime_pm_put:
1247 intel_runtime_pm_put(dev_priv);
1248 i915_driver_cleanup_early(dev_priv);
1249out_pci_disable:
1250 pci_disable_device(pdev);
1251out_free_priv:
1252 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1253 drm_dev_unref(&dev_priv->drm);
1254 return ret;
1255}
1256
Chris Wilson42f55512016-06-24 14:00:26 +01001257void i915_driver_unload(struct drm_device *dev)
Chris Wilson0673ad42016-06-24 14:00:22 +01001258{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001259 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001260 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001261
1262 intel_fbdev_fini(dev);
1263
Chris Wilson42f55512016-06-24 14:00:26 +01001264 if (i915_gem_suspend(dev))
1265 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +01001266
1267 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1268
1269 i915_driver_unregister(dev_priv);
1270
1271 drm_vblank_cleanup(dev);
1272
1273 intel_modeset_cleanup(dev);
1274
1275 /*
1276 * free the memory space allocated for the child device
1277 * config parsed from VBT
1278 */
1279 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1280 kfree(dev_priv->vbt.child_dev);
1281 dev_priv->vbt.child_dev = NULL;
1282 dev_priv->vbt.child_dev_num = 0;
1283 }
1284 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1285 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1286 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1287 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1288
David Weinehall52a05c32016-08-22 13:32:44 +03001289 vga_switcheroo_unregister_client(pdev);
1290 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +01001291
1292 intel_csr_ucode_fini(dev_priv);
1293
1294 /* Free error state after interrupts are fully disabled. */
1295 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1296 i915_destroy_error_state(dev);
1297
1298 /* Flush any outstanding unpin_work. */
Chris Wilsonb7137e02016-07-13 09:10:37 +01001299 drain_workqueue(dev_priv->wq);
Chris Wilson0673ad42016-06-24 14:00:22 +01001300
1301 intel_guc_fini(dev);
1302 i915_gem_fini(dev);
1303 intel_fbc_cleanup_cfb(dev_priv);
1304
1305 intel_power_domains_fini(dev_priv);
1306
1307 i915_driver_cleanup_hw(dev_priv);
1308 i915_driver_cleanup_mmio(dev_priv);
1309
1310 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1311
1312 i915_driver_cleanup_early(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001313}
1314
1315static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1316{
1317 int ret;
1318
1319 ret = i915_gem_open(dev, file);
1320 if (ret)
1321 return ret;
1322
1323 return 0;
1324}
1325
1326/**
1327 * i915_driver_lastclose - clean up after all DRM clients have exited
1328 * @dev: DRM device
1329 *
1330 * Take care of cleaning up after all DRM clients have exited. In the
1331 * mode setting case, we want to restore the kernel's initial mode (just
1332 * in case the last client left us in a bad state).
1333 *
1334 * Additionally, in the non-mode setting case, we'll tear down the GTT
1335 * and DMA structures, since the kernel won't be using them, and clea
1336 * up any GEM state.
1337 */
1338static void i915_driver_lastclose(struct drm_device *dev)
1339{
1340 intel_fbdev_restore_mode(dev);
1341 vga_switcheroo_process_delayed_switch();
1342}
1343
1344static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1345{
1346 mutex_lock(&dev->struct_mutex);
1347 i915_gem_context_close(dev, file);
1348 i915_gem_release(dev, file);
1349 mutex_unlock(&dev->struct_mutex);
1350}
1351
1352static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1353{
1354 struct drm_i915_file_private *file_priv = file->driver_priv;
1355
1356 kfree(file_priv);
1357}
1358
Imre Deak07f9cd02014-08-18 14:42:45 +03001359static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1360{
Chris Wilson91c8a322016-07-05 10:40:23 +01001361 struct drm_device *dev = &dev_priv->drm;
Jani Nikula19c80542015-12-16 12:48:16 +02001362 struct intel_encoder *encoder;
Imre Deak07f9cd02014-08-18 14:42:45 +03001363
1364 drm_modeset_lock_all(dev);
Jani Nikula19c80542015-12-16 12:48:16 +02001365 for_each_intel_encoder(dev, encoder)
1366 if (encoder->suspend)
1367 encoder->suspend(encoder);
Imre Deak07f9cd02014-08-18 14:42:45 +03001368 drm_modeset_unlock_all(dev);
1369}
1370
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001371static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1372 bool rpm_resume);
Imre Deak507e1262016-04-20 20:27:54 +03001373static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
Suketu Shahf75a1982015-04-16 14:22:11 +05301374
Imre Deakbc872292015-11-18 17:32:30 +02001375static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1376{
1377#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1378 if (acpi_target_system_state() < ACPI_STATE_S3)
1379 return true;
1380#endif
1381 return false;
1382}
Sagar Kambleebc32822014-08-13 23:07:05 +05301383
Imre Deak5e365c32014-10-23 19:23:25 +03001384static int i915_drm_suspend(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001385{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001386 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001387 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnese5747e32014-06-12 08:35:47 -07001388 pci_power_t opregion_target_state;
Daniel Vetterd5818932015-02-23 12:03:26 +01001389 int error;
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001390
Zhang Ruib8efb172013-02-05 15:41:53 +08001391 /* ignore lid events during suspend */
1392 mutex_lock(&dev_priv->modeset_restore_lock);
1393 dev_priv->modeset_restore = MODESET_SUSPENDED;
1394 mutex_unlock(&dev_priv->modeset_restore_lock);
1395
Imre Deak1f814da2015-12-16 02:52:19 +02001396 disable_rpm_wakeref_asserts(dev_priv);
1397
Paulo Zanonic67a4702013-08-19 13:18:09 -03001398 /* We do a lot of poking in a lot of registers, make sure they work
1399 * properly. */
Imre Deakda7e29b2014-02-18 00:02:02 +02001400 intel_display_set_init_power(dev_priv, true);
Paulo Zanonicb107992013-01-25 16:59:15 -02001401
Dave Airlie5bcf7192010-12-07 09:20:40 +10001402 drm_kms_helper_poll_disable(dev);
1403
David Weinehall52a05c32016-08-22 13:32:44 +03001404 pci_save_state(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001405
Daniel Vetterd5818932015-02-23 12:03:26 +01001406 error = i915_gem_suspend(dev);
1407 if (error) {
David Weinehall52a05c32016-08-22 13:32:44 +03001408 dev_err(&pdev->dev,
Daniel Vetterd5818932015-02-23 12:03:26 +01001409 "GEM idle failed, resume might fail\n");
Imre Deak1f814da2015-12-16 02:52:19 +02001410 goto out;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001411 }
1412
Alex Daia1c41992015-09-30 09:46:37 -07001413 intel_guc_suspend(dev);
1414
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02001415 intel_display_suspend(dev);
Daniel Vetterd5818932015-02-23 12:03:26 +01001416
1417 intel_dp_mst_suspend(dev);
1418
1419 intel_runtime_pm_disable_interrupts(dev_priv);
1420 intel_hpd_cancel_work(dev_priv);
1421
1422 intel_suspend_encoders(dev_priv);
1423
1424 intel_suspend_hw(dev);
1425
Ben Widawsky828c7902013-10-16 09:21:30 -07001426 i915_gem_suspend_gtt_mappings(dev);
1427
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001428 i915_save_state(dev);
1429
Imre Deakbc872292015-11-18 17:32:30 +02001430 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001431 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
Jesse Barnese5747e32014-06-12 08:35:47 -07001432
Chris Wilsondc979972016-05-10 14:10:04 +01001433 intel_uncore_forcewake_reset(dev_priv, false);
Chris Wilson03d92e42016-05-23 15:08:10 +01001434 intel_opregion_unregister(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001435
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001436 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Dave Airlie3fa016a2012-03-28 10:48:49 +01001437
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001438 dev_priv->suspend_count++;
1439
Kristen Carlson Accardi85e90672014-06-12 08:35:44 -07001440 intel_display_set_init_power(dev_priv, false);
1441
Imre Deakf74ed082016-04-18 14:48:21 +03001442 intel_csr_ucode_suspend(dev_priv);
Imre Deakf514c2d2015-10-28 23:59:06 +02001443
Imre Deak1f814da2015-12-16 02:52:19 +02001444out:
1445 enable_rpm_wakeref_asserts(dev_priv);
1446
1447 return error;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001448}
1449
David Weinehallc49d13e2016-08-22 13:32:42 +03001450static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
Imre Deakc3c09c92014-10-23 19:23:15 +03001451{
David Weinehallc49d13e2016-08-22 13:32:42 +03001452 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001453 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deakbc872292015-11-18 17:32:30 +02001454 bool fw_csr;
Imre Deakc3c09c92014-10-23 19:23:15 +03001455 int ret;
1456
Imre Deak1f814da2015-12-16 02:52:19 +02001457 disable_rpm_wakeref_asserts(dev_priv);
1458
Imre Deaka7c81252016-04-01 16:02:38 +03001459 fw_csr = !IS_BROXTON(dev_priv) &&
1460 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
Imre Deakbc872292015-11-18 17:32:30 +02001461 /*
1462 * In case of firmware assisted context save/restore don't manually
1463 * deinit the power domains. This also means the CSR/DMC firmware will
1464 * stay active, it will power down any HW resources as required and
1465 * also enable deeper system power states that would be blocked if the
1466 * firmware was inactive.
1467 */
1468 if (!fw_csr)
1469 intel_power_domains_suspend(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02001470
Imre Deak507e1262016-04-20 20:27:54 +03001471 ret = 0;
Imre Deakb8aea3d12016-04-20 20:27:55 +03001472 if (IS_BROXTON(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001473 bxt_enable_dc9(dev_priv);
Imre Deakb8aea3d12016-04-20 20:27:55 +03001474 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001475 hsw_enable_pc8(dev_priv);
1476 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1477 ret = vlv_suspend_complete(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001478
1479 if (ret) {
1480 DRM_ERROR("Suspend complete failed: %d\n", ret);
Imre Deakbc872292015-11-18 17:32:30 +02001481 if (!fw_csr)
1482 intel_power_domains_init_hw(dev_priv, true);
Imre Deakc3c09c92014-10-23 19:23:15 +03001483
Imre Deak1f814da2015-12-16 02:52:19 +02001484 goto out;
Imre Deakc3c09c92014-10-23 19:23:15 +03001485 }
1486
David Weinehall52a05c32016-08-22 13:32:44 +03001487 pci_disable_device(pdev);
Imre Deakab3be732015-03-02 13:04:41 +02001488 /*
Imre Deak54875572015-06-30 17:06:47 +03001489 * During hibernation on some platforms the BIOS may try to access
Imre Deakab3be732015-03-02 13:04:41 +02001490 * the device even though it's already in D3 and hang the machine. So
1491 * leave the device in D0 on those platforms and hope the BIOS will
Imre Deak54875572015-06-30 17:06:47 +03001492 * power down the device properly. The issue was seen on multiple old
1493 * GENs with different BIOS vendors, so having an explicit blacklist
1494 * is inpractical; apply the workaround on everything pre GEN6. The
1495 * platforms where the issue was seen:
1496 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1497 * Fujitsu FSC S7110
1498 * Acer Aspire 1830T
Imre Deakab3be732015-03-02 13:04:41 +02001499 */
Imre Deak54875572015-06-30 17:06:47 +03001500 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
David Weinehall52a05c32016-08-22 13:32:44 +03001501 pci_set_power_state(pdev, PCI_D3hot);
Imre Deakc3c09c92014-10-23 19:23:15 +03001502
Imre Deakbc872292015-11-18 17:32:30 +02001503 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
1504
Imre Deak1f814da2015-12-16 02:52:19 +02001505out:
1506 enable_rpm_wakeref_asserts(dev_priv);
1507
1508 return ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03001509}
1510
Maarten Lankhorst1751fcf2015-08-27 15:15:15 +02001511int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001512{
1513 int error;
1514
Chris Wilsonded8b072016-07-05 10:40:22 +01001515 if (!dev) {
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001516 DRM_ERROR("dev: %p\n", dev);
Keith Packard1ae8c0a2009-06-28 15:42:17 -07001517 DRM_ERROR("DRM not initialized, aborting suspend.\n");
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001518 return -ENODEV;
1519 }
1520
Imre Deak0b14cbd2014-09-10 18:16:55 +03001521 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1522 state.event != PM_EVENT_FREEZE))
1523 return -EINVAL;
Dave Airlie5bcf7192010-12-07 09:20:40 +10001524
1525 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1526 return 0;
Chris Wilson6eecba32010-09-08 09:45:11 +01001527
Imre Deak5e365c32014-10-23 19:23:25 +03001528 error = i915_drm_suspend(dev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001529 if (error)
1530 return error;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001531
Imre Deakab3be732015-03-02 13:04:41 +02001532 return i915_drm_suspend_late(dev, false);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001533}
1534
Imre Deak5e365c32014-10-23 19:23:25 +03001535static int i915_drm_resume(struct drm_device *dev)
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001536{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001537 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001538 int ret;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01001539
Imre Deak1f814da2015-12-16 02:52:19 +02001540 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonabc80ab2016-08-24 10:27:01 +01001541 intel_sanitize_gt_powersave(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02001542
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001543 ret = i915_ggtt_enable_hw(dev_priv);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001544 if (ret)
1545 DRM_ERROR("failed to re-enable GGTT\n");
1546
Imre Deakf74ed082016-04-18 14:48:21 +03001547 intel_csr_ucode_resume(dev_priv);
1548
Chris Wilson5ab57c72016-07-15 14:56:20 +01001549 i915_gem_resume(dev);
Paulo Zanoni9d49c0e2013-09-12 18:06:43 -03001550
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001551 i915_restore_state(dev);
Imre Deak8090ba82016-08-10 14:07:33 +03001552 intel_pps_unlock_regs_wa(dev_priv);
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001553 intel_opregion_setup(dev_priv);
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001554
Daniel Vetterd5818932015-02-23 12:03:26 +01001555 intel_init_pch_refclk(dev);
1556 drm_mode_config_reset(dev);
Chris Wilson1833b132012-05-09 11:56:28 +01001557
Peter Antoine364aece2015-05-11 08:50:45 +01001558 /*
1559 * Interrupts have to be enabled before any batches are run. If not the
1560 * GPU will hang. i915_gem_init_hw() will initiate batches to
1561 * update/restore the context.
1562 *
1563 * Modeset enabling in intel_modeset_init_hw() also needs working
1564 * interrupts.
1565 */
1566 intel_runtime_pm_enable_interrupts(dev_priv);
1567
Daniel Vetterd5818932015-02-23 12:03:26 +01001568 mutex_lock(&dev->struct_mutex);
1569 if (i915_gem_init_hw(dev)) {
1570 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
Chris Wilson821ed7d2016-09-09 14:11:53 +01001571 i915_gem_set_wedged(dev_priv);
Jesse Barnesd5bb0812011-01-05 12:01:26 -08001572 }
Daniel Vetterd5818932015-02-23 12:03:26 +01001573 mutex_unlock(&dev->struct_mutex);
1574
Alex Daia1c41992015-09-30 09:46:37 -07001575 intel_guc_resume(dev);
1576
Daniel Vetterd5818932015-02-23 12:03:26 +01001577 intel_modeset_init_hw(dev);
1578
1579 spin_lock_irq(&dev_priv->irq_lock);
1580 if (dev_priv->display.hpd_irq_setup)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001581 dev_priv->display.hpd_irq_setup(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001582 spin_unlock_irq(&dev_priv->irq_lock);
1583
Daniel Vetterd5818932015-02-23 12:03:26 +01001584 intel_dp_mst_resume(dev);
1585
Lyudea16b7652016-03-11 10:57:01 -05001586 intel_display_resume(dev);
1587
Daniel Vetterd5818932015-02-23 12:03:26 +01001588 /*
1589 * ... but also need to make sure that hotplug processing
1590 * doesn't cause havoc. Like in the driver load code we don't
1591 * bother with the tiny race here where we might loose hotplug
1592 * notifications.
1593 * */
1594 intel_hpd_init(dev_priv);
1595 /* Config may have changed between suspend and resume */
1596 drm_helper_hpd_irq_event(dev);
Jesse Barnes1daed3f2011-01-05 12:01:25 -08001597
Chris Wilson03d92e42016-05-23 15:08:10 +01001598 intel_opregion_register(dev_priv);
Chris Wilson44834a62010-08-19 16:09:23 +01001599
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001600 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Jesse Barnes073f34d2012-11-02 11:13:59 -07001601
Zhang Ruib8efb172013-02-05 15:41:53 +08001602 mutex_lock(&dev_priv->modeset_restore_lock);
1603 dev_priv->modeset_restore = MODESET_DONE;
1604 mutex_unlock(&dev_priv->modeset_restore_lock);
Paulo Zanoni8a187452013-12-06 20:32:13 -02001605
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001606 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Jesse Barnese5747e32014-06-12 08:35:47 -07001607
Chris Wilson54b4f682016-07-21 21:16:19 +01001608 intel_autoenable_gt_powersave(dev_priv);
Imre Deakee6f2802014-10-23 19:23:22 +03001609 drm_kms_helper_poll_enable(dev);
1610
Imre Deak1f814da2015-12-16 02:52:19 +02001611 enable_rpm_wakeref_asserts(dev_priv);
1612
Chris Wilson074c6ad2014-04-09 09:19:43 +01001613 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001614}
1615
Imre Deak5e365c32014-10-23 19:23:25 +03001616static int i915_drm_resume_early(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001617{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001618 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001619 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deak44410cd2016-04-18 14:45:54 +03001620 int ret;
Imre Deak36d61e62014-10-23 19:23:24 +03001621
Imre Deak76c4b252014-04-01 19:55:22 +03001622 /*
1623 * We have a resume ordering issue with the snd-hda driver also
1624 * requiring our device to be power up. Due to the lack of a
1625 * parent/child relationship we currently solve this with an early
1626 * resume hook.
1627 *
1628 * FIXME: This should be solved with a special hdmi sink device or
1629 * similar so that power domains can be employed.
1630 */
Imre Deak44410cd2016-04-18 14:45:54 +03001631
1632 /*
1633 * Note that we need to set the power state explicitly, since we
1634 * powered off the device during freeze and the PCI core won't power
1635 * it back up for us during thaw. Powering off the device during
1636 * freeze is not a hard requirement though, and during the
1637 * suspend/resume phases the PCI core makes sure we get here with the
1638 * device powered on. So in case we change our freeze logic and keep
1639 * the device powered we can also remove the following set power state
1640 * call.
1641 */
David Weinehall52a05c32016-08-22 13:32:44 +03001642 ret = pci_set_power_state(pdev, PCI_D0);
Imre Deak44410cd2016-04-18 14:45:54 +03001643 if (ret) {
1644 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1645 goto out;
1646 }
1647
1648 /*
1649 * Note that pci_enable_device() first enables any parent bridge
1650 * device and only then sets the power state for this device. The
1651 * bridge enabling is a nop though, since bridge devices are resumed
1652 * first. The order of enabling power and enabling the device is
1653 * imposed by the PCI core as described above, so here we preserve the
1654 * same order for the freeze/thaw phases.
1655 *
1656 * TODO: eventually we should remove pci_disable_device() /
1657 * pci_enable_enable_device() from suspend/resume. Due to how they
1658 * depend on the device enable refcount we can't anyway depend on them
1659 * disabling/enabling the device.
1660 */
David Weinehall52a05c32016-08-22 13:32:44 +03001661 if (pci_enable_device(pdev)) {
Imre Deakbc872292015-11-18 17:32:30 +02001662 ret = -EIO;
1663 goto out;
1664 }
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001665
David Weinehall52a05c32016-08-22 13:32:44 +03001666 pci_set_master(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001667
Imre Deak1f814da2015-12-16 02:52:19 +02001668 disable_rpm_wakeref_asserts(dev_priv);
1669
Wayne Boyer666a4532015-12-09 12:29:35 -08001670 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001671 ret = vlv_resume_prepare(dev_priv, false);
Imre Deak36d61e62014-10-23 19:23:24 +03001672 if (ret)
Damien Lespiauff0b1872015-05-20 14:45:15 +01001673 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1674 ret);
Imre Deak36d61e62014-10-23 19:23:24 +03001675
Chris Wilsondc979972016-05-10 14:10:04 +01001676 intel_uncore_early_sanitize(dev_priv, true);
Paulo Zanoniefee8332014-10-27 17:54:33 -02001677
Chris Wilsondc979972016-05-10 14:10:04 +01001678 if (IS_BROXTON(dev_priv)) {
Imre Deakda2f41d2016-04-20 20:27:56 +03001679 if (!dev_priv->suspended_to_idle)
1680 gen9_sanitize_dc_state(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03001681 bxt_disable_dc9(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001682 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Damien Lespiaua9a6b732015-05-20 14:45:14 +01001683 hsw_disable_pc8(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001684 }
Paulo Zanoniefee8332014-10-27 17:54:33 -02001685
Chris Wilsondc979972016-05-10 14:10:04 +01001686 intel_uncore_sanitize(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02001687
Imre Deaka7c81252016-04-01 16:02:38 +03001688 if (IS_BROXTON(dev_priv) ||
1689 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
Imre Deakbc872292015-11-18 17:32:30 +02001690 intel_power_domains_init_hw(dev_priv, true);
1691
Imre Deak6e35e8a2016-04-18 10:04:19 +03001692 enable_rpm_wakeref_asserts(dev_priv);
1693
Imre Deakbc872292015-11-18 17:32:30 +02001694out:
1695 dev_priv->suspended_to_idle = false;
Imre Deak36d61e62014-10-23 19:23:24 +03001696
1697 return ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001698}
1699
Maarten Lankhorst1751fcf2015-08-27 15:15:15 +02001700int i915_resume_switcheroo(struct drm_device *dev)
Imre Deak76c4b252014-04-01 19:55:22 +03001701{
Imre Deak50a00722014-10-23 19:23:17 +03001702 int ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001703
Imre Deak097dd832014-10-23 19:23:19 +03001704 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1705 return 0;
1706
Imre Deak5e365c32014-10-23 19:23:25 +03001707 ret = i915_drm_resume_early(dev);
Imre Deak50a00722014-10-23 19:23:17 +03001708 if (ret)
1709 return ret;
1710
Imre Deak5a175142014-10-23 19:23:18 +03001711 return i915_drm_resume(dev);
1712}
1713
Chris Wilson9e60ab02016-10-04 21:11:28 +01001714static void disable_engines_irq(struct drm_i915_private *dev_priv)
1715{
1716 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05301717 enum intel_engine_id id;
Chris Wilson9e60ab02016-10-04 21:11:28 +01001718
1719 /* Ensure irq handler finishes, and not run again. */
1720 disable_irq(dev_priv->drm.irq);
Akash Goel3b3f1652016-10-13 22:44:48 +05301721 for_each_engine(engine, dev_priv, id)
Chris Wilson9e60ab02016-10-04 21:11:28 +01001722 tasklet_kill(&engine->irq_tasklet);
1723}
1724
1725static void enable_engines_irq(struct drm_i915_private *dev_priv)
1726{
1727 enable_irq(dev_priv->drm.irq);
1728}
1729
Ben Gamari11ed50e2009-09-14 17:48:45 -04001730/**
Eugeni Dodonovf3953dc2011-11-28 16:15:17 -02001731 * i915_reset - reset chip after a hang
Ben Gamari11ed50e2009-09-14 17:48:45 -04001732 * @dev: drm device to reset
Ben Gamari11ed50e2009-09-14 17:48:45 -04001733 *
Chris Wilson780f2622016-09-09 14:11:52 +01001734 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1735 * on failure.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001736 *
Chris Wilson221fe792016-09-09 14:11:51 +01001737 * Caller must hold the struct_mutex.
1738 *
Ben Gamari11ed50e2009-09-14 17:48:45 -04001739 * Procedure is fairly simple:
1740 * - reset the chip using the reset reg
1741 * - re-init context state
1742 * - re-init hardware status page
1743 * - re-init ring buffer
1744 * - re-init interrupt state
1745 * - re-init display
1746 */
Chris Wilson780f2622016-09-09 14:11:52 +01001747void i915_reset(struct drm_i915_private *dev_priv)
Ben Gamari11ed50e2009-09-14 17:48:45 -04001748{
Chris Wilson91c8a322016-07-05 10:40:23 +01001749 struct drm_device *dev = &dev_priv->drm;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001750 struct i915_gpu_error *error = &dev_priv->gpu_error;
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001751 int ret;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001752
Chris Wilson221fe792016-09-09 14:11:51 +01001753 lockdep_assert_held(&dev->struct_mutex);
1754
1755 if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
Chris Wilson780f2622016-09-09 14:11:52 +01001756 return;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001757
Chris Wilsond98c52c2016-04-13 17:35:05 +01001758 /* Clear any previous failed attempts at recovery. Time to try again. */
Chris Wilson8af29b02016-09-09 14:11:47 +01001759 __clear_bit(I915_WEDGED, &error->flags);
1760 error->reset_count++;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001761
Chris Wilson7b4d3a12016-07-04 08:08:37 +01001762 pr_notice("drm/i915: Resetting chip after gpu hang\n");
Chris Wilson9e60ab02016-10-04 21:11:28 +01001763
1764 disable_engines_irq(dev_priv);
Chris Wilsondc979972016-05-10 14:10:04 +01001765 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
Chris Wilson9e60ab02016-10-04 21:11:28 +01001766 enable_engines_irq(dev_priv);
1767
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001768 if (ret) {
Chris Wilson804e59a2016-04-13 17:35:09 +01001769 if (ret != -ENODEV)
1770 DRM_ERROR("Failed to reset chip: %i\n", ret);
1771 else
1772 DRM_DEBUG_DRIVER("GPU reset disabled\n");
Chris Wilsond98c52c2016-04-13 17:35:05 +01001773 goto error;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001774 }
1775
Chris Wilson821ed7d2016-09-09 14:11:53 +01001776 i915_gem_reset(dev_priv);
Ville Syrjälä1362b772014-11-26 17:07:29 +02001777 intel_overlay_reset(dev_priv);
1778
Ben Gamari11ed50e2009-09-14 17:48:45 -04001779 /* Ok, now get things going again... */
1780
1781 /*
1782 * Everything depends on having the GTT running, so we need to start
1783 * there. Fortunately we don't need to do this unless we reset the
1784 * chip at a PCI level.
1785 *
1786 * Next we need to restore the context, but we don't use those
1787 * yet either...
1788 *
1789 * Ring buffer needs to be re-initialized in the KMS case, or if X
1790 * was running at the time of the reset (i.e. we weren't VT
1791 * switched away).
1792 */
Daniel Vetter33d30a92015-02-23 12:03:27 +01001793 ret = i915_gem_init_hw(dev);
Daniel Vetter33d30a92015-02-23 12:03:27 +01001794 if (ret) {
1795 DRM_ERROR("Failed hw init on reset %d\n", ret);
Chris Wilsond98c52c2016-04-13 17:35:05 +01001796 goto error;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001797 }
1798
Chris Wilson780f2622016-09-09 14:11:52 +01001799wakeup:
1800 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
1801 return;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001802
1803error:
Chris Wilson821ed7d2016-09-09 14:11:53 +01001804 i915_gem_set_wedged(dev_priv);
Chris Wilson780f2622016-09-09 14:11:52 +01001805 goto wakeup;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001806}
1807
David Weinehallc49d13e2016-08-22 13:32:42 +03001808static int i915_pm_suspend(struct device *kdev)
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001809{
David Weinehallc49d13e2016-08-22 13:32:42 +03001810 struct pci_dev *pdev = to_pci_dev(kdev);
1811 struct drm_device *dev = pci_get_drvdata(pdev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001812
David Weinehallc49d13e2016-08-22 13:32:42 +03001813 if (!dev) {
1814 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001815 return -ENODEV;
1816 }
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001817
David Weinehallc49d13e2016-08-22 13:32:42 +03001818 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10001819 return 0;
1820
David Weinehallc49d13e2016-08-22 13:32:42 +03001821 return i915_drm_suspend(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03001822}
1823
David Weinehallc49d13e2016-08-22 13:32:42 +03001824static int i915_pm_suspend_late(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03001825{
David Weinehallc49d13e2016-08-22 13:32:42 +03001826 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03001827
1828 /*
Damien Lespiauc965d9952015-05-18 19:53:48 +01001829 * We have a suspend ordering issue with the snd-hda driver also
Imre Deak76c4b252014-04-01 19:55:22 +03001830 * requiring our device to be power up. Due to the lack of a
1831 * parent/child relationship we currently solve this with an late
1832 * suspend hook.
1833 *
1834 * FIXME: This should be solved with a special hdmi sink device or
1835 * similar so that power domains can be employed.
1836 */
David Weinehallc49d13e2016-08-22 13:32:42 +03001837 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak76c4b252014-04-01 19:55:22 +03001838 return 0;
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001839
David Weinehallc49d13e2016-08-22 13:32:42 +03001840 return i915_drm_suspend_late(dev, false);
Imre Deakab3be732015-03-02 13:04:41 +02001841}
1842
David Weinehallc49d13e2016-08-22 13:32:42 +03001843static int i915_pm_poweroff_late(struct device *kdev)
Imre Deakab3be732015-03-02 13:04:41 +02001844{
David Weinehallc49d13e2016-08-22 13:32:42 +03001845 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deakab3be732015-03-02 13:04:41 +02001846
David Weinehallc49d13e2016-08-22 13:32:42 +03001847 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deakab3be732015-03-02 13:04:41 +02001848 return 0;
1849
David Weinehallc49d13e2016-08-22 13:32:42 +03001850 return i915_drm_suspend_late(dev, true);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001851}
1852
David Weinehallc49d13e2016-08-22 13:32:42 +03001853static int i915_pm_resume_early(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03001854{
David Weinehallc49d13e2016-08-22 13:32:42 +03001855 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03001856
David Weinehallc49d13e2016-08-22 13:32:42 +03001857 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03001858 return 0;
1859
David Weinehallc49d13e2016-08-22 13:32:42 +03001860 return i915_drm_resume_early(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03001861}
1862
David Weinehallc49d13e2016-08-22 13:32:42 +03001863static int i915_pm_resume(struct device *kdev)
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001864{
David Weinehallc49d13e2016-08-22 13:32:42 +03001865 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001866
David Weinehallc49d13e2016-08-22 13:32:42 +03001867 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03001868 return 0;
1869
David Weinehallc49d13e2016-08-22 13:32:42 +03001870 return i915_drm_resume(dev);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001871}
1872
Chris Wilson1f19ac22016-05-14 07:26:32 +01001873/* freeze: before creating the hibernation_image */
David Weinehallc49d13e2016-08-22 13:32:42 +03001874static int i915_pm_freeze(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001875{
Chris Wilson6a800ea2016-09-21 14:51:07 +01001876 int ret;
1877
1878 ret = i915_pm_suspend(kdev);
1879 if (ret)
1880 return ret;
1881
1882 ret = i915_gem_freeze(kdev_to_i915(kdev));
1883 if (ret)
1884 return ret;
1885
1886 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01001887}
1888
David Weinehallc49d13e2016-08-22 13:32:42 +03001889static int i915_pm_freeze_late(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001890{
Chris Wilson461fb992016-05-14 07:26:33 +01001891 int ret;
1892
David Weinehallc49d13e2016-08-22 13:32:42 +03001893 ret = i915_pm_suspend_late(kdev);
Chris Wilson461fb992016-05-14 07:26:33 +01001894 if (ret)
1895 return ret;
1896
David Weinehallc49d13e2016-08-22 13:32:42 +03001897 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
Chris Wilson461fb992016-05-14 07:26:33 +01001898 if (ret)
1899 return ret;
1900
1901 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01001902}
1903
1904/* thaw: called after creating the hibernation image, but before turning off. */
David Weinehallc49d13e2016-08-22 13:32:42 +03001905static int i915_pm_thaw_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001906{
David Weinehallc49d13e2016-08-22 13:32:42 +03001907 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001908}
1909
David Weinehallc49d13e2016-08-22 13:32:42 +03001910static int i915_pm_thaw(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001911{
David Weinehallc49d13e2016-08-22 13:32:42 +03001912 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001913}
1914
1915/* restore: called after loading the hibernation image. */
David Weinehallc49d13e2016-08-22 13:32:42 +03001916static int i915_pm_restore_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001917{
David Weinehallc49d13e2016-08-22 13:32:42 +03001918 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001919}
1920
David Weinehallc49d13e2016-08-22 13:32:42 +03001921static int i915_pm_restore(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001922{
David Weinehallc49d13e2016-08-22 13:32:42 +03001923 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001924}
1925
Imre Deakddeea5b2014-05-05 15:19:56 +03001926/*
1927 * Save all Gunit registers that may be lost after a D3 and a subsequent
1928 * S0i[R123] transition. The list of registers needing a save/restore is
1929 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1930 * registers in the following way:
1931 * - Driver: saved/restored by the driver
1932 * - Punit : saved/restored by the Punit firmware
1933 * - No, w/o marking: no need to save/restore, since the register is R/O or
1934 * used internally by the HW in a way that doesn't depend
1935 * keeping the content across a suspend/resume.
1936 * - Debug : used for debugging
1937 *
1938 * We save/restore all registers marked with 'Driver', with the following
1939 * exceptions:
1940 * - Registers out of use, including also registers marked with 'Debug'.
1941 * These have no effect on the driver's operation, so we don't save/restore
1942 * them to reduce the overhead.
1943 * - Registers that are fully setup by an initialization function called from
1944 * the resume path. For example many clock gating and RPS/RC6 registers.
1945 * - Registers that provide the right functionality with their reset defaults.
1946 *
1947 * TODO: Except for registers that based on the above 3 criteria can be safely
1948 * ignored, we save/restore all others, practically treating the HW context as
1949 * a black-box for the driver. Further investigation is needed to reduce the
1950 * saved/restored registers even further, by following the same 3 criteria.
1951 */
1952static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1953{
1954 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1955 int i;
1956
1957 /* GAM 0x4000-0x4770 */
1958 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1959 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1960 s->arb_mode = I915_READ(ARB_MODE);
1961 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1962 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1963
1964 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03001965 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03001966
1967 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
Imre Deakb5f1c972015-04-15 16:52:30 -07001968 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
Imre Deakddeea5b2014-05-05 15:19:56 +03001969
1970 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1971 s->ecochk = I915_READ(GAM_ECOCHK);
1972 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1973 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1974
1975 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1976
1977 /* MBC 0x9024-0x91D0, 0x8500 */
1978 s->g3dctl = I915_READ(VLV_G3DCTL);
1979 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1980 s->mbctl = I915_READ(GEN6_MBCTL);
1981
1982 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1983 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1984 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1985 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1986 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1987 s->rstctl = I915_READ(GEN6_RSTCTL);
1988 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1989
1990 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1991 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1992 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1993 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1994 s->ecobus = I915_READ(ECOBUS);
1995 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1996 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1997 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1998 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1999 s->rcedata = I915_READ(VLV_RCEDATA);
2000 s->spare2gh = I915_READ(VLV_SPAREG2H);
2001
2002 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2003 s->gt_imr = I915_READ(GTIMR);
2004 s->gt_ier = I915_READ(GTIER);
2005 s->pm_imr = I915_READ(GEN6_PMIMR);
2006 s->pm_ier = I915_READ(GEN6_PMIER);
2007
2008 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002009 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002010
2011 /* GT SA CZ domain, 0x100000-0x138124 */
2012 s->tilectl = I915_READ(TILECTL);
2013 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2014 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2015 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2016 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2017
2018 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2019 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2020 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002021 s->pcbr = I915_READ(VLV_PCBR);
Imre Deakddeea5b2014-05-05 15:19:56 +03002022 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2023
2024 /*
2025 * Not saving any of:
2026 * DFT, 0x9800-0x9EC0
2027 * SARB, 0xB000-0xB1FC
2028 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2029 * PCI CFG
2030 */
2031}
2032
2033static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2034{
2035 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2036 u32 val;
2037 int i;
2038
2039 /* GAM 0x4000-0x4770 */
2040 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2041 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2042 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2043 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2044 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2045
2046 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002047 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002048
2049 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
Imre Deakb5f1c972015-04-15 16:52:30 -07002050 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
Imre Deakddeea5b2014-05-05 15:19:56 +03002051
2052 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2053 I915_WRITE(GAM_ECOCHK, s->ecochk);
2054 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2055 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2056
2057 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2058
2059 /* MBC 0x9024-0x91D0, 0x8500 */
2060 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2061 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2062 I915_WRITE(GEN6_MBCTL, s->mbctl);
2063
2064 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2065 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2066 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2067 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2068 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2069 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2070 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2071
2072 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2073 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2074 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2075 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2076 I915_WRITE(ECOBUS, s->ecobus);
2077 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2078 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2079 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2080 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2081 I915_WRITE(VLV_RCEDATA, s->rcedata);
2082 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2083
2084 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2085 I915_WRITE(GTIMR, s->gt_imr);
2086 I915_WRITE(GTIER, s->gt_ier);
2087 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2088 I915_WRITE(GEN6_PMIER, s->pm_ier);
2089
2090 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002091 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002092
2093 /* GT SA CZ domain, 0x100000-0x138124 */
2094 I915_WRITE(TILECTL, s->tilectl);
2095 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2096 /*
2097 * Preserve the GT allow wake and GFX force clock bit, they are not
2098 * be restored, as they are used to control the s0ix suspend/resume
2099 * sequence by the caller.
2100 */
2101 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2102 val &= VLV_GTLC_ALLOWWAKEREQ;
2103 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2104 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2105
2106 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2107 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2108 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2109 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2110
2111 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2112
2113 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2114 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2115 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002116 I915_WRITE(VLV_PCBR, s->pcbr);
Imre Deakddeea5b2014-05-05 15:19:56 +03002117 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2118}
2119
Imre Deak650ad972014-04-18 16:35:02 +03002120int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2121{
2122 u32 val;
2123 int err;
2124
Imre Deak650ad972014-04-18 16:35:02 +03002125 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2126 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2127 if (force_on)
2128 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2129 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2130
2131 if (!force_on)
2132 return 0;
2133
Chris Wilsonc6ddc5f2016-06-30 15:32:46 +01002134 err = intel_wait_for_register(dev_priv,
2135 VLV_GTLC_SURVIVABILITY_REG,
2136 VLV_GFX_CLK_STATUS_BIT,
2137 VLV_GFX_CLK_STATUS_BIT,
2138 20);
Imre Deak650ad972014-04-18 16:35:02 +03002139 if (err)
2140 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2141 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2142
2143 return err;
Imre Deak650ad972014-04-18 16:35:02 +03002144}
2145
Imre Deakddeea5b2014-05-05 15:19:56 +03002146static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2147{
2148 u32 val;
2149 int err = 0;
2150
2151 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2152 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2153 if (allow)
2154 val |= VLV_GTLC_ALLOWWAKEREQ;
2155 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2156 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2157
Chris Wilsonb2736692016-06-30 15:32:47 +01002158 err = intel_wait_for_register(dev_priv,
2159 VLV_GTLC_PW_STATUS,
2160 VLV_GTLC_ALLOWWAKEACK,
2161 allow,
2162 1);
Imre Deakddeea5b2014-05-05 15:19:56 +03002163 if (err)
2164 DRM_ERROR("timeout disabling GT waking\n");
Chris Wilsonb2736692016-06-30 15:32:47 +01002165
Imre Deakddeea5b2014-05-05 15:19:56 +03002166 return err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002167}
2168
2169static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2170 bool wait_for_on)
2171{
2172 u32 mask;
2173 u32 val;
2174 int err;
2175
2176 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2177 val = wait_for_on ? mask : 0;
Chris Wilson41ce4052016-06-30 15:32:48 +01002178 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
Imre Deakddeea5b2014-05-05 15:19:56 +03002179 return 0;
2180
2181 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02002182 onoff(wait_for_on),
2183 I915_READ(VLV_GTLC_PW_STATUS));
Imre Deakddeea5b2014-05-05 15:19:56 +03002184
2185 /*
2186 * RC6 transitioning can be delayed up to 2 msec (see
2187 * valleyview_enable_rps), use 3 msec for safety.
2188 */
Chris Wilson41ce4052016-06-30 15:32:48 +01002189 err = intel_wait_for_register(dev_priv,
2190 VLV_GTLC_PW_STATUS, mask, val,
2191 3);
Imre Deakddeea5b2014-05-05 15:19:56 +03002192 if (err)
2193 DRM_ERROR("timeout waiting for GT wells to go %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02002194 onoff(wait_for_on));
Imre Deakddeea5b2014-05-05 15:19:56 +03002195
2196 return err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002197}
2198
2199static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2200{
2201 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2202 return;
2203
Daniel Vetter6fa283b2016-01-19 21:00:56 +01002204 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
Imre Deakddeea5b2014-05-05 15:19:56 +03002205 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2206}
2207
Sagar Kambleebc32822014-08-13 23:07:05 +05302208static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
Imre Deakddeea5b2014-05-05 15:19:56 +03002209{
2210 u32 mask;
2211 int err;
2212
2213 /*
2214 * Bspec defines the following GT well on flags as debug only, so
2215 * don't treat them as hard failures.
2216 */
2217 (void)vlv_wait_for_gt_wells(dev_priv, false);
2218
2219 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2220 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2221
2222 vlv_check_no_gt_access(dev_priv);
2223
2224 err = vlv_force_gfx_clock(dev_priv, true);
2225 if (err)
2226 goto err1;
2227
2228 err = vlv_allow_gt_wake(dev_priv, false);
2229 if (err)
2230 goto err2;
Deepak S98711162014-12-12 14:18:16 +05302231
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002232 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302233 vlv_save_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002234
2235 err = vlv_force_gfx_clock(dev_priv, false);
2236 if (err)
2237 goto err2;
2238
2239 return 0;
2240
2241err2:
2242 /* For safety always re-enable waking and disable gfx clock forcing */
2243 vlv_allow_gt_wake(dev_priv, true);
2244err1:
2245 vlv_force_gfx_clock(dev_priv, false);
2246
2247 return err;
2248}
2249
Sagar Kamble016970b2014-08-13 23:07:06 +05302250static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2251 bool rpm_resume)
Imre Deakddeea5b2014-05-05 15:19:56 +03002252{
Chris Wilson91c8a322016-07-05 10:40:23 +01002253 struct drm_device *dev = &dev_priv->drm;
Imre Deakddeea5b2014-05-05 15:19:56 +03002254 int err;
2255 int ret;
2256
2257 /*
2258 * If any of the steps fail just try to continue, that's the best we
2259 * can do at this point. Return the first error code (which will also
2260 * leave RPM permanently disabled).
2261 */
2262 ret = vlv_force_gfx_clock(dev_priv, true);
2263
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002264 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302265 vlv_restore_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002266
2267 err = vlv_allow_gt_wake(dev_priv, true);
2268 if (!ret)
2269 ret = err;
2270
2271 err = vlv_force_gfx_clock(dev_priv, false);
2272 if (!ret)
2273 ret = err;
2274
2275 vlv_check_no_gt_access(dev_priv);
2276
Sagar Kamble016970b2014-08-13 23:07:06 +05302277 if (rpm_resume) {
2278 intel_init_clock_gating(dev);
2279 i915_gem_restore_fences(dev);
2280 }
Imre Deakddeea5b2014-05-05 15:19:56 +03002281
2282 return ret;
2283}
2284
David Weinehallc49d13e2016-08-22 13:32:42 +03002285static int intel_runtime_suspend(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002286{
David Weinehallc49d13e2016-08-22 13:32:42 +03002287 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002288 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002289 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002290 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002291
Chris Wilsondc979972016-05-10 14:10:04 +01002292 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
Imre Deakc6df39b2014-04-14 20:24:29 +03002293 return -ENODEV;
2294
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002295 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002296 return -ENODEV;
2297
Paulo Zanoni8a187452013-12-06 20:32:13 -02002298 DRM_DEBUG_KMS("Suspending device\n");
2299
Imre Deak9486db62014-04-22 20:21:07 +03002300 /*
Imre Deakd6102972014-05-07 19:57:49 +03002301 * We could deadlock here in case another thread holding struct_mutex
2302 * calls RPM suspend concurrently, since the RPM suspend will wait
2303 * first for this RPM suspend to finish. In this case the concurrent
2304 * RPM resume will be followed by its RPM suspend counterpart. Still
2305 * for consistency return -EAGAIN, which will reschedule this suspend.
2306 */
2307 if (!mutex_trylock(&dev->struct_mutex)) {
2308 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
2309 /*
2310 * Bump the expiration timestamp, otherwise the suspend won't
2311 * be rescheduled.
2312 */
David Weinehallc49d13e2016-08-22 13:32:42 +03002313 pm_runtime_mark_last_busy(kdev);
Imre Deakd6102972014-05-07 19:57:49 +03002314
2315 return -EAGAIN;
2316 }
Imre Deak1f814da2015-12-16 02:52:19 +02002317
2318 disable_rpm_wakeref_asserts(dev_priv);
2319
Imre Deakd6102972014-05-07 19:57:49 +03002320 /*
2321 * We are safe here against re-faults, since the fault handler takes
2322 * an RPM reference.
2323 */
2324 i915_gem_release_all_mmaps(dev_priv);
2325 mutex_unlock(&dev->struct_mutex);
2326
Alex Daia1c41992015-09-30 09:46:37 -07002327 intel_guc_suspend(dev);
2328
Imre Deak2eb52522014-11-19 15:30:05 +02002329 intel_runtime_pm_disable_interrupts(dev_priv);
Imre Deakb5478bc2014-04-14 20:24:37 +03002330
Imre Deak507e1262016-04-20 20:27:54 +03002331 ret = 0;
2332 if (IS_BROXTON(dev_priv)) {
2333 bxt_display_core_uninit(dev_priv);
2334 bxt_enable_dc9(dev_priv);
2335 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2336 hsw_enable_pc8(dev_priv);
2337 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2338 ret = vlv_suspend_complete(dev_priv);
2339 }
2340
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002341 if (ret) {
2342 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
Daniel Vetterb9632912014-09-30 10:56:44 +02002343 intel_runtime_pm_enable_interrupts(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002344
Imre Deak1f814da2015-12-16 02:52:19 +02002345 enable_rpm_wakeref_asserts(dev_priv);
2346
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002347 return ret;
2348 }
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002349
Chris Wilsondc979972016-05-10 14:10:04 +01002350 intel_uncore_forcewake_reset(dev_priv, false);
Imre Deak1f814da2015-12-16 02:52:19 +02002351
2352 enable_rpm_wakeref_asserts(dev_priv);
2353 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002354
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002355 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002356 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2357
Paulo Zanoni8a187452013-12-06 20:32:13 -02002358 dev_priv->pm.suspended = true;
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002359
2360 /*
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002361 * FIXME: We really should find a document that references the arguments
2362 * used below!
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002363 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002364 if (IS_BROADWELL(dev_priv)) {
Paulo Zanonid37ae192015-07-30 18:20:29 -03002365 /*
2366 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2367 * being detected, and the call we do at intel_runtime_resume()
2368 * won't be able to restore them. Since PCI_D3hot matches the
2369 * actual specification and appears to be working, use it.
2370 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002371 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
Paulo Zanonid37ae192015-07-30 18:20:29 -03002372 } else {
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002373 /*
2374 * current versions of firmware which depend on this opregion
2375 * notification have repurposed the D1 definition to mean
2376 * "runtime suspended" vs. what you would normally expect (D3)
2377 * to distinguish it from notifications that might be sent via
2378 * the suspend path.
2379 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002380 intel_opregion_notify_adapter(dev_priv, PCI_D1);
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002381 }
Paulo Zanoni8a187452013-12-06 20:32:13 -02002382
Mika Kuoppala59bad942015-01-16 11:34:40 +02002383 assert_forcewakes_inactive(dev_priv);
Chris Wilsondc9fb092015-01-16 11:34:34 +02002384
Lyude19625e82016-06-21 17:03:44 -04002385 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
2386 intel_hpd_poll_init(dev_priv);
2387
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002388 DRM_DEBUG_KMS("Device suspended\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002389 return 0;
2390}
2391
David Weinehallc49d13e2016-08-22 13:32:42 +03002392static int intel_runtime_resume(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002393{
David Weinehallc49d13e2016-08-22 13:32:42 +03002394 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002395 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002396 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002397 int ret = 0;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002398
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002399 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002400 return -ENODEV;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002401
2402 DRM_DEBUG_KMS("Resuming device\n");
2403
Imre Deak1f814da2015-12-16 02:52:19 +02002404 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2405 disable_rpm_wakeref_asserts(dev_priv);
2406
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002407 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002408 dev_priv->pm.suspended = false;
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002409 if (intel_uncore_unclaimed_mmio(dev_priv))
2410 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002411
Alex Daia1c41992015-09-30 09:46:37 -07002412 intel_guc_resume(dev);
2413
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002414 if (IS_GEN6(dev_priv))
2415 intel_init_pch_refclk(dev);
Suketu Shah31335ce2014-11-24 13:37:45 +05302416
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +01002417 if (IS_BROXTON(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002418 bxt_disable_dc9(dev_priv);
2419 bxt_display_core_init(dev_priv, true);
Imre Deakf62c79b2016-04-20 20:27:57 +03002420 if (dev_priv->csr.dmc_payload &&
2421 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2422 gen9_enable_dc5(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002423 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002424 hsw_disable_pc8(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002425 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002426 ret = vlv_resume_prepare(dev_priv, true);
Imre Deak507e1262016-04-20 20:27:54 +03002427 }
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002428
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002429 /*
2430 * No point of rolling back things in case of an error, as the best
2431 * we can do is to hope that things will still work (and disable RPM).
2432 */
Imre Deak92b806d2014-04-14 20:24:39 +03002433 i915_gem_init_swizzling(dev);
Imre Deak92b806d2014-04-14 20:24:39 +03002434
Daniel Vetterb9632912014-09-30 10:56:44 +02002435 intel_runtime_pm_enable_interrupts(dev_priv);
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002436
2437 /*
2438 * On VLV/CHV display interrupts are part of the display
2439 * power well, so hpd is reinitialized from there. For
2440 * everyone else do it here.
2441 */
Wayne Boyer666a4532015-12-09 12:29:35 -08002442 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002443 intel_hpd_init(dev_priv);
2444
Imre Deak1f814da2015-12-16 02:52:19 +02002445 enable_rpm_wakeref_asserts(dev_priv);
2446
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002447 if (ret)
2448 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2449 else
2450 DRM_DEBUG_KMS("Device resumed\n");
2451
2452 return ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002453}
2454
Chris Wilson42f55512016-06-24 14:00:26 +01002455const struct dev_pm_ops i915_pm_ops = {
Imre Deak5545dbb2014-10-23 19:23:28 +03002456 /*
2457 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2458 * PMSG_RESUME]
2459 */
Akshay Joshi0206e352011-08-16 15:34:10 -04002460 .suspend = i915_pm_suspend,
Imre Deak76c4b252014-04-01 19:55:22 +03002461 .suspend_late = i915_pm_suspend_late,
2462 .resume_early = i915_pm_resume_early,
Akshay Joshi0206e352011-08-16 15:34:10 -04002463 .resume = i915_pm_resume,
Imre Deak5545dbb2014-10-23 19:23:28 +03002464
2465 /*
2466 * S4 event handlers
2467 * @freeze, @freeze_late : called (1) before creating the
2468 * hibernation image [PMSG_FREEZE] and
2469 * (2) after rebooting, before restoring
2470 * the image [PMSG_QUIESCE]
2471 * @thaw, @thaw_early : called (1) after creating the hibernation
2472 * image, before writing it [PMSG_THAW]
2473 * and (2) after failing to create or
2474 * restore the image [PMSG_RECOVER]
2475 * @poweroff, @poweroff_late: called after writing the hibernation
2476 * image, before rebooting [PMSG_HIBERNATE]
2477 * @restore, @restore_early : called after rebooting and restoring the
2478 * hibernation image [PMSG_RESTORE]
2479 */
Chris Wilson1f19ac22016-05-14 07:26:32 +01002480 .freeze = i915_pm_freeze,
2481 .freeze_late = i915_pm_freeze_late,
2482 .thaw_early = i915_pm_thaw_early,
2483 .thaw = i915_pm_thaw,
Imre Deak36d61e62014-10-23 19:23:24 +03002484 .poweroff = i915_pm_suspend,
Imre Deakab3be732015-03-02 13:04:41 +02002485 .poweroff_late = i915_pm_poweroff_late,
Chris Wilson1f19ac22016-05-14 07:26:32 +01002486 .restore_early = i915_pm_restore_early,
2487 .restore = i915_pm_restore,
Imre Deak5545dbb2014-10-23 19:23:28 +03002488
2489 /* S0ix (via runtime suspend) event handlers */
Paulo Zanoni97bea202014-03-07 20:12:33 -03002490 .runtime_suspend = intel_runtime_suspend,
2491 .runtime_resume = intel_runtime_resume,
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002492};
2493
Laurent Pinchart78b68552012-05-17 13:27:22 +02002494static const struct vm_operations_struct i915_gem_vm_ops = {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002495 .fault = i915_gem_fault,
Jesse Barnesab00b3e2009-02-11 14:01:46 -08002496 .open = drm_gem_vm_open,
2497 .close = drm_gem_vm_close,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002498};
2499
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002500static const struct file_operations i915_driver_fops = {
2501 .owner = THIS_MODULE,
2502 .open = drm_open,
2503 .release = drm_release,
2504 .unlocked_ioctl = drm_ioctl,
2505 .mmap = drm_gem_mmap,
2506 .poll = drm_poll,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002507 .read = drm_read,
2508#ifdef CONFIG_COMPAT
2509 .compat_ioctl = i915_compat_ioctl,
2510#endif
2511 .llseek = noop_llseek,
2512};
2513
Chris Wilson0673ad42016-06-24 14:00:22 +01002514static int
2515i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2516 struct drm_file *file)
2517{
2518 return -ENODEV;
2519}
2520
2521static const struct drm_ioctl_desc i915_ioctls[] = {
2522 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2523 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2524 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2525 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2526 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2527 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2528 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2529 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2530 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2531 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2532 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2533 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2534 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2535 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2536 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2537 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2538 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2539 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2540 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2541 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2542 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2543 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2544 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2545 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2546 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2547 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2548 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2549 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2550 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2551 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2552 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2553 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2554 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2555 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2556 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2557 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
2558 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
2559 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2560 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2561 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2562 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2563 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2564 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2565 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2566 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2567 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2568 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2569 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2570 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2571 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2572 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2573 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2574};
2575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576static struct drm_driver driver = {
Michael Witten0c547812011-08-25 17:55:54 +00002577 /* Don't use MTRRs here; the Xserver or userspace app should
2578 * deal with them for Intel hardware.
Dave Airlie792d2b92005-11-11 23:30:27 +11002579 */
Eric Anholt673a3942008-07-30 12:06:12 -07002580 .driver_features =
Kristian Høgsberg10ba5012013-08-25 18:29:01 +02002581 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
Maarten Lankhorst1751fcf2015-08-27 15:15:15 +02002582 DRIVER_RENDER | DRIVER_MODESET,
Eric Anholt673a3942008-07-30 12:06:12 -07002583 .open = i915_driver_open,
Dave Airlie22eae942005-11-10 22:16:34 +11002584 .lastclose = i915_driver_lastclose,
2585 .preclose = i915_driver_preclose,
Eric Anholt673a3942008-07-30 12:06:12 -07002586 .postclose = i915_driver_postclose,
David Herrmann915b4d12014-08-29 12:12:43 +02002587 .set_busid = drm_pci_set_busid,
Rafael J. Wysockid8e29202010-01-09 00:45:33 +01002588
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002589 .gem_close_object = i915_gem_close_object,
Eric Anholt673a3942008-07-30 12:06:12 -07002590 .gem_free_object = i915_gem_free_object,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002591 .gem_vm_ops = &i915_gem_vm_ops,
Daniel Vetter1286ff72012-05-10 15:25:09 +02002592
2593 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2594 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2595 .gem_prime_export = i915_gem_prime_export,
2596 .gem_prime_import = i915_gem_prime_import,
2597
Dave Airlieff72145b2011-02-07 12:16:14 +10002598 .dumb_create = i915_gem_dumb_create,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002599 .dumb_map_offset = i915_gem_mmap_gtt,
Daniel Vetter43387b32013-07-16 09:12:04 +02002600 .dumb_destroy = drm_gem_dumb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 .ioctls = i915_ioctls,
Chris Wilson0673ad42016-06-24 14:00:22 +01002602 .num_ioctls = ARRAY_SIZE(i915_ioctls),
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002603 .fops = &i915_driver_fops,
Dave Airlie22eae942005-11-10 22:16:34 +11002604 .name = DRIVER_NAME,
2605 .desc = DRIVER_DESC,
2606 .date = DRIVER_DATE,
2607 .major = DRIVER_MAJOR,
2608 .minor = DRIVER_MINOR,
2609 .patchlevel = DRIVER_PATCHLEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610};