blob: 5dd7fc582e6fd9d4975f9ed2f5968d247d27e892 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jesse Barnese5747e32014-06-12 08:35:47 -070030#include <linux/acpi.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010031#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/vt.h>
42#include <acpi/video.h>
43
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drmP.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010045#include <drm/drm_crtc_helper.h>
Maarten Lankhorsta667fb42016-12-15 15:29:44 +010046#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010047#include <drm/i915_drm.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include "i915_drv.h"
Chris Wilson990bbda2012-07-02 11:51:02 -030050#include "i915_trace.h"
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +000051#include "i915_pmu.h"
Lionel Landwerlina446ae22018-03-06 12:28:56 +000052#include "i915_query.h"
Chris Wilson0673ad42016-06-24 14:00:22 +010053#include "i915_vgpu.h"
Kenneth Graunkef49f0582010-09-11 01:19:14 -070054#include "intel_drv.h"
Anusha Srivatsa5464cd62017-01-18 08:05:58 -080055#include "intel_uc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Kristian Høgsberg112b7152009-01-04 16:55:33 -050057static struct drm_driver driver;
58
Michal Wajdeczkofae919f2018-02-01 17:32:48 +000059#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
Chris Wilson0673ad42016-06-24 14:00:22 +010060static unsigned int i915_load_fail_count;
61
62bool __i915_inject_load_failure(const char *func, int line)
63{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000064 if (i915_load_fail_count >= i915_modparams.inject_load_failure)
Chris Wilson0673ad42016-06-24 14:00:22 +010065 return false;
66
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000067 if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
Chris Wilson0673ad42016-06-24 14:00:22 +010068 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000069 i915_modparams.inject_load_failure, func, line);
Chris Wilsoncf68f0c2018-06-06 15:41:53 +010070 i915_modparams.inject_load_failure = 0;
Chris Wilson0673ad42016-06-24 14:00:22 +010071 return true;
72 }
73
74 return false;
75}
Chris Wilson51c18bf2018-06-09 12:10:58 +010076
77bool i915_error_injected(void)
78{
79 return i915_load_fail_count && !i915_modparams.inject_load_failure;
80}
81
Michal Wajdeczkofae919f2018-02-01 17:32:48 +000082#endif
Chris Wilson0673ad42016-06-24 14:00:22 +010083
84#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
85#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
86 "providing the dmesg log by booting with drm.debug=0xf"
87
88void
89__i915_printk(struct drm_i915_private *dev_priv, const char *level,
90 const char *fmt, ...)
91{
92 static bool shown_bug_once;
David Weinehallc49d13e2016-08-22 13:32:42 +030093 struct device *kdev = dev_priv->drm.dev;
Chris Wilson0673ad42016-06-24 14:00:22 +010094 bool is_error = level[1] <= KERN_ERR[1];
95 bool is_debug = level[1] == KERN_DEBUG[1];
96 struct va_format vaf;
97 va_list args;
98
99 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
100 return;
101
102 va_start(args, fmt);
103
104 vaf.fmt = fmt;
105 vaf.va = &args;
106
Chris Wilson8cff1f42018-07-09 14:48:58 +0100107 if (is_error)
108 dev_printk(level, kdev, "%pV", &vaf);
109 else
110 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
111 __builtin_return_address(0), &vaf);
112
113 va_end(args);
Chris Wilson0673ad42016-06-24 14:00:22 +0100114
115 if (is_error && !shown_bug_once) {
Chris Wilson4e8507b2018-05-06 19:31:47 +0100116 /*
117 * Ask the user to file a bug report for the error, except
118 * if they may have caused the bug by fiddling with unsafe
119 * module parameters.
120 */
121 if (!test_taint(TAINT_USER))
122 dev_notice(kdev, "%s", FDO_BUG_MSG);
Chris Wilson0673ad42016-06-24 14:00:22 +0100123 shown_bug_once = true;
124 }
Chris Wilson0673ad42016-06-24 14:00:22 +0100125}
126
Jani Nikulada6c10c22018-02-05 19:31:36 +0200127/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
128static enum intel_pch
129intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
130{
131 switch (id) {
132 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
133 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
134 WARN_ON(!IS_GEN5(dev_priv));
135 return PCH_IBX;
136 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
137 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
138 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
139 return PCH_CPT;
140 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
141 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
142 WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
143 /* PantherPoint is CPT compatible */
144 return PCH_CPT;
145 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
146 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
147 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
148 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
149 return PCH_LPT;
150 case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
151 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
152 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
153 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
154 return PCH_LPT;
155 case INTEL_PCH_WPT_DEVICE_ID_TYPE:
156 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
157 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
158 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
159 /* WildcatPoint is LPT compatible */
160 return PCH_LPT;
161 case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
162 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
163 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
164 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
165 /* WildcatPoint is LPT compatible */
166 return PCH_LPT;
167 case INTEL_PCH_SPT_DEVICE_ID_TYPE:
168 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
169 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
170 return PCH_SPT;
171 case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
172 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
173 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
174 return PCH_SPT;
175 case INTEL_PCH_KBP_DEVICE_ID_TYPE:
176 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
177 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
178 !IS_COFFEELAKE(dev_priv));
179 return PCH_KBP;
180 case INTEL_PCH_CNP_DEVICE_ID_TYPE:
181 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
182 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
183 return PCH_CNP;
184 case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
185 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
186 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
187 return PCH_CNP;
188 case INTEL_PCH_ICP_DEVICE_ID_TYPE:
189 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
190 WARN_ON(!IS_ICELAKE(dev_priv));
191 return PCH_ICP;
192 default:
193 return PCH_NONE;
194 }
195}
Chris Wilson0673ad42016-06-24 14:00:22 +0100196
Jani Nikula435ad2c2018-02-05 19:31:37 +0200197static bool intel_is_virt_pch(unsigned short id,
198 unsigned short svendor, unsigned short sdevice)
199{
200 return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
201 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
202 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
203 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
204 sdevice == PCI_SUBDEVICE_ID_QEMU));
205}
206
Jani Nikula40ace642018-02-05 19:31:38 +0200207static unsigned short
208intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
Robert Beckett30c964a2015-08-28 13:10:22 +0100209{
Jani Nikula40ace642018-02-05 19:31:38 +0200210 unsigned short id = 0;
Robert Beckett30c964a2015-08-28 13:10:22 +0100211
212 /*
213 * In a virtualized passthrough environment we can be in a
214 * setup where the ISA bridge is not able to be passed through.
215 * In this case, a south bridge can be emulated and we have to
216 * make an educated guess as to which PCH is really there.
217 */
218
Jani Nikula40ace642018-02-05 19:31:38 +0200219 if (IS_GEN5(dev_priv))
220 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
221 else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
222 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
223 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
224 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
225 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
226 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
227 else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
228 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
229 else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
230 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
Anusha Srivatsaf17ca502018-05-21 17:25:43 -0700231 else if (IS_ICELAKE(dev_priv))
232 id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
Robert Beckett30c964a2015-08-28 13:10:22 +0100233
Jani Nikula40ace642018-02-05 19:31:38 +0200234 if (id)
235 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
236 else
237 DRM_DEBUG_KMS("Assuming no PCH\n");
238
239 return id;
Robert Beckett30c964a2015-08-28 13:10:22 +0100240}
241
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000242static void intel_detect_pch(struct drm_i915_private *dev_priv)
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800243{
Imre Deakbcdb72a2014-02-14 20:23:54 +0200244 struct pci_dev *pch = NULL;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800245
246 /*
247 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
248 * make graphics device passthrough work easy for VMM, that only
249 * need to expose ISA bridge to let driver know the real hardware
250 * underneath. This is a requirement from virtualization team.
Rui Guo6a9c4b32013-06-19 21:10:23 +0800251 *
252 * In some virtualized environments (e.g. XEN), there is irrelevant
253 * ISA bridge in the system. To work reliably, we should scan trhough
254 * all the ISA bridge devices and check for the first match, instead
255 * of only checking the first one.
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800256 */
Imre Deakbcdb72a2014-02-14 20:23:54 +0200257 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200258 unsigned short id;
Jani Nikulada6c10c22018-02-05 19:31:36 +0200259 enum intel_pch pch_type;
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300260
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200261 if (pch->vendor != PCI_VENDOR_ID_INTEL)
262 continue;
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -0700263
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200264 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
Imre Deakbcdb72a2014-02-14 20:23:54 +0200265
Jani Nikulada6c10c22018-02-05 19:31:36 +0200266 pch_type = intel_pch_type(dev_priv, id);
267 if (pch_type != PCH_NONE) {
268 dev_priv->pch_type = pch_type;
Jani Nikula40ace642018-02-05 19:31:38 +0200269 dev_priv->pch_id = id;
270 break;
Jani Nikula435ad2c2018-02-05 19:31:37 +0200271 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
Jani Nikula40ace642018-02-05 19:31:38 +0200272 pch->subsystem_device)) {
273 id = intel_virt_detect_pch(dev_priv);
Jani Nikula85b17e62018-06-08 15:33:28 +0300274 pch_type = intel_pch_type(dev_priv, id);
275
276 /* Sanity check virtual PCH id */
277 if (WARN_ON(id && pch_type == PCH_NONE))
278 id = 0;
279
Jani Nikula40ace642018-02-05 19:31:38 +0200280 dev_priv->pch_type = pch_type;
281 dev_priv->pch_id = id;
282 break;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800283 }
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800284 }
Jani Nikula07ba0a82018-06-08 15:33:30 +0300285
286 /*
287 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
288 * display.
289 */
290 if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
291 DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
292 dev_priv->pch_type = PCH_NOP;
293 dev_priv->pch_id = 0;
294 }
295
Rui Guo6a9c4b32013-06-19 21:10:23 +0800296 if (!pch)
Imre Deakbcdb72a2014-02-14 20:23:54 +0200297 DRM_DEBUG_KMS("No PCH found.\n");
298
299 pci_dev_put(pch);
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800300}
301
Ville Syrjälä6a20fe72018-02-07 18:48:41 +0200302static int i915_getparam_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *file_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100304{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100305 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300306 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100307 drm_i915_getparam_t *param = data;
308 int value;
309
310 switch (param->param) {
311 case I915_PARAM_IRQ_ACTIVE:
312 case I915_PARAM_ALLOW_BATCHBUFFER:
313 case I915_PARAM_LAST_DISPATCH:
Kenneth Graunkeef0f4112017-02-15 01:34:46 -0800314 case I915_PARAM_HAS_EXEC_CONSTANTS:
Chris Wilson0673ad42016-06-24 14:00:22 +0100315 /* Reject all old ums/dri params. */
316 return -ENODEV;
317 case I915_PARAM_CHIPSET_ID:
David Weinehall52a05c32016-08-22 13:32:44 +0300318 value = pdev->device;
Chris Wilson0673ad42016-06-24 14:00:22 +0100319 break;
320 case I915_PARAM_REVISION:
David Weinehall52a05c32016-08-22 13:32:44 +0300321 value = pdev->revision;
Chris Wilson0673ad42016-06-24 14:00:22 +0100322 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100323 case I915_PARAM_NUM_FENCES_AVAIL:
324 value = dev_priv->num_fence_regs;
325 break;
326 case I915_PARAM_HAS_OVERLAY:
327 value = dev_priv->overlay ? 1 : 0;
328 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100329 case I915_PARAM_HAS_BSD:
Akash Goel3b3f1652016-10-13 22:44:48 +0530330 value = !!dev_priv->engine[VCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100331 break;
332 case I915_PARAM_HAS_BLT:
Akash Goel3b3f1652016-10-13 22:44:48 +0530333 value = !!dev_priv->engine[BCS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100334 break;
335 case I915_PARAM_HAS_VEBOX:
Akash Goel3b3f1652016-10-13 22:44:48 +0530336 value = !!dev_priv->engine[VECS];
Chris Wilson0673ad42016-06-24 14:00:22 +0100337 break;
338 case I915_PARAM_HAS_BSD2:
Akash Goel3b3f1652016-10-13 22:44:48 +0530339 value = !!dev_priv->engine[VCS2];
Chris Wilson0673ad42016-06-24 14:00:22 +0100340 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100341 case I915_PARAM_HAS_LLC:
David Weinehall16162472016-09-02 13:46:17 +0300342 value = HAS_LLC(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100343 break;
344 case I915_PARAM_HAS_WT:
David Weinehall16162472016-09-02 13:46:17 +0300345 value = HAS_WT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100346 break;
347 case I915_PARAM_HAS_ALIASING_PPGTT:
David Weinehall16162472016-09-02 13:46:17 +0300348 value = USES_PPGTT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100349 break;
350 case I915_PARAM_HAS_SEMAPHORES:
Chris Wilson93c6e962017-11-20 20:55:04 +0000351 value = HAS_LEGACY_SEMAPHORES(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100352 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100353 case I915_PARAM_HAS_SECURE_BATCHES:
354 value = capable(CAP_SYS_ADMIN);
355 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100356 case I915_PARAM_CMD_PARSER_VERSION:
357 value = i915_cmd_parser_get_version(dev_priv);
358 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100359 case I915_PARAM_SUBSLICE_TOTAL:
Imre Deak57ec1712016-08-31 19:13:05 +0300360 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
Chris Wilson0673ad42016-06-24 14:00:22 +0100361 if (!value)
362 return -ENODEV;
363 break;
364 case I915_PARAM_EU_TOTAL:
Imre Deak43b67992016-08-31 19:13:02 +0300365 value = INTEL_INFO(dev_priv)->sseu.eu_total;
Chris Wilson0673ad42016-06-24 14:00:22 +0100366 if (!value)
367 return -ENODEV;
368 break;
369 case I915_PARAM_HAS_GPU_RESET:
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000370 value = i915_modparams.enable_hangcheck &&
371 intel_has_gpu_reset(dev_priv);
Michel Thierry142bc7d2017-06-20 10:57:46 +0100372 if (value && intel_has_reset_engine(dev_priv))
373 value = 2;
Chris Wilson0673ad42016-06-24 14:00:22 +0100374 break;
375 case I915_PARAM_HAS_RESOURCE_STREAMER:
Lucas De Marchi08e3e212018-08-03 16:24:43 -0700376 value = 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100377 break;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100378 case I915_PARAM_HAS_POOLED_EU:
David Weinehall16162472016-09-02 13:46:17 +0300379 value = HAS_POOLED_EU(dev_priv);
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100380 break;
381 case I915_PARAM_MIN_EU_IN_POOL:
Imre Deak43b67992016-08-31 19:13:02 +0300382 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100383 break;
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800384 case I915_PARAM_HUC_STATUS:
Michal Wajdeczkofa265272018-03-14 20:04:29 +0000385 value = intel_huc_check_status(&dev_priv->huc);
386 if (value < 0)
387 return value;
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800388 break;
Chris Wilson4cc69072016-08-25 19:05:19 +0100389 case I915_PARAM_MMAP_GTT_VERSION:
390 /* Though we've started our numbering from 1, and so class all
391 * earlier versions as 0, in effect their value is undefined as
392 * the ioctl will report EINVAL for the unknown param!
393 */
394 value = i915_gem_mmap_gtt_version();
395 break;
Chris Wilson0de91362016-11-14 20:41:01 +0000396 case I915_PARAM_HAS_SCHEDULER:
Chris Wilson3fed1802018-02-07 21:05:43 +0000397 value = dev_priv->caps.scheduler;
Chris Wilson0de91362016-11-14 20:41:01 +0000398 break;
Chris Wilsonbeecec92017-10-03 21:34:52 +0100399
David Weinehall16162472016-09-02 13:46:17 +0300400 case I915_PARAM_MMAP_VERSION:
401 /* Remember to bump this if the version changes! */
402 case I915_PARAM_HAS_GEM:
403 case I915_PARAM_HAS_PAGEFLIPPING:
404 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
405 case I915_PARAM_HAS_RELAXED_FENCING:
406 case I915_PARAM_HAS_COHERENT_RINGS:
407 case I915_PARAM_HAS_RELAXED_DELTA:
408 case I915_PARAM_HAS_GEN7_SOL_RESET:
409 case I915_PARAM_HAS_WAIT_TIMEOUT:
410 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
411 case I915_PARAM_HAS_PINNED_BATCHES:
412 case I915_PARAM_HAS_EXEC_NO_RELOC:
413 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
414 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
415 case I915_PARAM_HAS_EXEC_SOFTPIN:
Chris Wilson77ae9952017-01-27 09:40:07 +0000416 case I915_PARAM_HAS_EXEC_ASYNC:
Chris Wilsonfec04452017-01-27 09:40:08 +0000417 case I915_PARAM_HAS_EXEC_FENCE:
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100418 case I915_PARAM_HAS_EXEC_CAPTURE:
Chris Wilson1a71cf22017-06-16 15:05:23 +0100419 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100420 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
David Weinehall16162472016-09-02 13:46:17 +0300421 /* For the time being all of these are always true;
422 * if some supported hardware does not have one of these
423 * features this value needs to be provided from
424 * INTEL_INFO(), a feature macro, or similar.
425 */
426 value = 1;
427 break;
Chris Wilsond2b4b972017-11-10 14:26:33 +0000428 case I915_PARAM_HAS_CONTEXT_ISOLATION:
429 value = intel_engines_has_context_isolation(dev_priv);
430 break;
Robert Bragg7fed5552017-06-13 12:22:59 +0100431 case I915_PARAM_SLICE_MASK:
432 value = INTEL_INFO(dev_priv)->sseu.slice_mask;
433 if (!value)
434 return -ENODEV;
435 break;
Robert Braggf5320232017-06-13 12:23:00 +0100436 case I915_PARAM_SUBSLICE_MASK:
Lionel Landwerlin8cc76692018-03-06 12:28:52 +0000437 value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
Robert Braggf5320232017-06-13 12:23:00 +0100438 if (!value)
439 return -ENODEV;
440 break;
Lionel Landwerlindab91782017-11-10 19:08:44 +0000441 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
Lionel Landwerlinf577a032017-11-13 23:34:53 +0000442 value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
Lionel Landwerlindab91782017-11-10 19:08:44 +0000443 break;
Chris Wilson900ccf32018-07-20 11:19:10 +0100444 case I915_PARAM_MMAP_GTT_COHERENT:
445 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
446 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100447 default:
448 DRM_DEBUG("Unknown parameter %d\n", param->param);
449 return -EINVAL;
450 }
451
Chris Wilsondda33002016-06-24 14:00:23 +0100452 if (put_user(value, param->value))
Chris Wilson0673ad42016-06-24 14:00:22 +0100453 return -EFAULT;
Chris Wilson0673ad42016-06-24 14:00:22 +0100454
455 return 0;
456}
457
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000458static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100459{
Sinan Kaya57b296462017-11-27 11:57:46 -0500460 int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
461
462 dev_priv->bridge_dev =
463 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
Chris Wilson0673ad42016-06-24 14:00:22 +0100464 if (!dev_priv->bridge_dev) {
465 DRM_ERROR("bridge device not found\n");
466 return -1;
467 }
468 return 0;
469}
470
471/* Allocate space for the MCH regs if needed, return nonzero on error */
472static int
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000473intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100474{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000475 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100476 u32 temp_lo, temp_hi = 0;
477 u64 mchbar_addr;
478 int ret;
479
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000480 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100481 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
482 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
483 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
484
485 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
486#ifdef CONFIG_PNP
487 if (mchbar_addr &&
488 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
489 return 0;
490#endif
491
492 /* Get some space for it */
493 dev_priv->mch_res.name = "i915 MCHBAR";
494 dev_priv->mch_res.flags = IORESOURCE_MEM;
495 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
496 &dev_priv->mch_res,
497 MCHBAR_SIZE, MCHBAR_SIZE,
498 PCIBIOS_MIN_MEM,
499 0, pcibios_align_resource,
500 dev_priv->bridge_dev);
501 if (ret) {
502 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
503 dev_priv->mch_res.start = 0;
504 return ret;
505 }
506
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000507 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100508 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
509 upper_32_bits(dev_priv->mch_res.start));
510
511 pci_write_config_dword(dev_priv->bridge_dev, reg,
512 lower_32_bits(dev_priv->mch_res.start));
513 return 0;
514}
515
516/* Setup MCHBAR if possible, return true if we should disable it again */
517static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000518intel_setup_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100519{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000520 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100521 u32 temp;
522 bool enabled;
523
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100524 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100525 return;
526
527 dev_priv->mchbar_need_disable = false;
528
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100529 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100530 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
531 enabled = !!(temp & DEVEN_MCHBAR_EN);
532 } else {
533 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
534 enabled = temp & 1;
535 }
536
537 /* If it's already enabled, don't have to do anything */
538 if (enabled)
539 return;
540
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000541 if (intel_alloc_mchbar_resource(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100542 return;
543
544 dev_priv->mchbar_need_disable = true;
545
546 /* Space is allocated or reserved, so enable it. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100547 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100548 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
549 temp | DEVEN_MCHBAR_EN);
550 } else {
551 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
552 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
553 }
554}
555
556static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000557intel_teardown_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100558{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000559 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100560
561 if (dev_priv->mchbar_need_disable) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100562 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100563 u32 deven_val;
564
565 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
566 &deven_val);
567 deven_val &= ~DEVEN_MCHBAR_EN;
568 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
569 deven_val);
570 } else {
571 u32 mchbar_val;
572
573 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
574 &mchbar_val);
575 mchbar_val &= ~1;
576 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
577 mchbar_val);
578 }
579 }
580
581 if (dev_priv->mch_res.start)
582 release_resource(&dev_priv->mch_res);
583}
584
585/* true = enable decode, false = disable decoder */
586static unsigned int i915_vga_set_decode(void *cookie, bool state)
587{
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000588 struct drm_i915_private *dev_priv = cookie;
Chris Wilson0673ad42016-06-24 14:00:22 +0100589
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000590 intel_modeset_vga_set_state(dev_priv, state);
Chris Wilson0673ad42016-06-24 14:00:22 +0100591 if (state)
592 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
593 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
594 else
595 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
596}
597
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +0000598static int i915_resume_switcheroo(struct drm_device *dev);
599static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
600
Chris Wilson0673ad42016-06-24 14:00:22 +0100601static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
602{
603 struct drm_device *dev = pci_get_drvdata(pdev);
604 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
605
606 if (state == VGA_SWITCHEROO_ON) {
607 pr_info("switched on\n");
608 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
609 /* i915 resume handler doesn't set to D0 */
David Weinehall52a05c32016-08-22 13:32:44 +0300610 pci_set_power_state(pdev, PCI_D0);
Chris Wilson0673ad42016-06-24 14:00:22 +0100611 i915_resume_switcheroo(dev);
612 dev->switch_power_state = DRM_SWITCH_POWER_ON;
613 } else {
614 pr_info("switched off\n");
615 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
616 i915_suspend_switcheroo(dev, pmm);
617 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
618 }
619}
620
621static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
622{
623 struct drm_device *dev = pci_get_drvdata(pdev);
624
625 /*
626 * FIXME: open_count is protected by drm_global_mutex but that would lead to
627 * locking inversion with the driver load path. And the access here is
628 * completely racy anyway. So don't bother with locking for now.
629 */
630 return dev->open_count == 0;
631}
632
633static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
634 .set_gpu_state = i915_switcheroo_set_state,
635 .reprobe = NULL,
636 .can_switch = i915_switcheroo_can_switch,
637};
638
Chris Wilson0673ad42016-06-24 14:00:22 +0100639static int i915_load_modeset_init(struct drm_device *dev)
640{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100641 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300642 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100643 int ret;
644
645 if (i915_inject_load_failure())
646 return -ENODEV;
647
Jani Nikula66578852017-03-10 15:27:57 +0200648 intel_bios_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100649
650 /* If we have > 1 VGA cards, then we need to arbitrate access
651 * to the common VGA resources.
652 *
653 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
654 * then we do not take part in VGA arbitration and the
655 * vga_client_register() fails with -ENODEV.
656 */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000657 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
Chris Wilson0673ad42016-06-24 14:00:22 +0100658 if (ret && ret != -ENODEV)
659 goto out;
660
661 intel_register_dsm_handler();
662
David Weinehall52a05c32016-08-22 13:32:44 +0300663 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
Chris Wilson0673ad42016-06-24 14:00:22 +0100664 if (ret)
665 goto cleanup_vga_client;
666
667 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
668 intel_update_rawclk(dev_priv);
669
670 intel_power_domains_init_hw(dev_priv, false);
671
672 intel_csr_ucode_init(dev_priv);
673
674 ret = intel_irq_install(dev_priv);
675 if (ret)
676 goto cleanup_csr;
677
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000678 intel_setup_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100679
680 /* Important: The output setup functions called by modeset_init need
681 * working irqs for e.g. gmbus and dp aux transfers. */
Ville Syrjäläb079bd172016-10-25 18:58:02 +0300682 ret = intel_modeset_init(dev);
683 if (ret)
684 goto cleanup_irq;
Chris Wilson0673ad42016-06-24 14:00:22 +0100685
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000686 ret = i915_gem_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100687 if (ret)
Chris Wilson73bad7c2018-07-10 10:44:21 +0100688 goto cleanup_modeset;
Chris Wilson0673ad42016-06-24 14:00:22 +0100689
Chris Wilsond378a3e2017-11-10 14:26:31 +0000690 intel_setup_overlay(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100691
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +0000692 if (INTEL_INFO(dev_priv)->num_pipes == 0)
Chris Wilson0673ad42016-06-24 14:00:22 +0100693 return 0;
694
695 ret = intel_fbdev_init(dev);
696 if (ret)
697 goto cleanup_gem;
698
699 /* Only enable hotplug handling once the fbdev is fully set up. */
700 intel_hpd_init(dev_priv);
701
Chris Wilson0673ad42016-06-24 14:00:22 +0100702 return 0;
703
704cleanup_gem:
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000705 if (i915_gem_suspend(dev_priv))
Imre Deak1c777c52016-10-12 17:46:37 +0300706 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100707 i915_gem_fini(dev_priv);
Chris Wilson73bad7c2018-07-10 10:44:21 +0100708cleanup_modeset:
709 intel_modeset_cleanup(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100710cleanup_irq:
Chris Wilson0673ad42016-06-24 14:00:22 +0100711 drm_irq_uninstall(dev);
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000712 intel_teardown_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100713cleanup_csr:
714 intel_csr_ucode_fini(dev_priv);
Imre Deak48a287e2018-08-06 12:58:35 +0300715 intel_power_domains_fini_hw(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300716 vga_switcheroo_unregister_client(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100717cleanup_vga_client:
David Weinehall52a05c32016-08-22 13:32:44 +0300718 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +0100719out:
720 return ret;
721}
722
Chris Wilson0673ad42016-06-24 14:00:22 +0100723static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
724{
725 struct apertures_struct *ap;
Chris Wilson91c8a322016-07-05 10:40:23 +0100726 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100727 struct i915_ggtt *ggtt = &dev_priv->ggtt;
728 bool primary;
729 int ret;
730
731 ap = alloc_apertures(1);
732 if (!ap)
733 return -ENOMEM;
734
Matthew Auld73ebd502017-12-11 15:18:20 +0000735 ap->ranges[0].base = ggtt->gmadr.start;
Chris Wilson0673ad42016-06-24 14:00:22 +0100736 ap->ranges[0].size = ggtt->mappable_end;
737
738 primary =
739 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
740
Daniel Vetter44adece2016-08-10 18:52:34 +0200741 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
Chris Wilson0673ad42016-06-24 14:00:22 +0100742
743 kfree(ap);
744
745 return ret;
746}
Chris Wilson0673ad42016-06-24 14:00:22 +0100747
748#if !defined(CONFIG_VGA_CONSOLE)
749static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
750{
751 return 0;
752}
753#elif !defined(CONFIG_DUMMY_CONSOLE)
754static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
755{
756 return -ENODEV;
757}
758#else
759static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
760{
761 int ret = 0;
762
763 DRM_INFO("Replacing VGA console driver\n");
764
765 console_lock();
766 if (con_is_bound(&vga_con))
767 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
768 if (ret == 0) {
769 ret = do_unregister_con_driver(&vga_con);
770
771 /* Ignore "already unregistered". */
772 if (ret == -ENODEV)
773 ret = 0;
774 }
775 console_unlock();
776
777 return ret;
778}
779#endif
780
Chris Wilson0673ad42016-06-24 14:00:22 +0100781static void intel_init_dpio(struct drm_i915_private *dev_priv)
782{
783 /*
784 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
785 * CHV x1 PHY (DP/HDMI D)
786 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
787 */
788 if (IS_CHERRYVIEW(dev_priv)) {
789 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
790 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
791 } else if (IS_VALLEYVIEW(dev_priv)) {
792 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
793 }
794}
795
796static int i915_workqueues_init(struct drm_i915_private *dev_priv)
797{
798 /*
799 * The i915 workqueue is primarily used for batched retirement of
800 * requests (and thus managing bo) once the task has been completed
Chris Wilsone61e0f52018-02-21 09:56:36 +0000801 * by the GPU. i915_retire_requests() is called directly when we
Chris Wilson0673ad42016-06-24 14:00:22 +0100802 * need high-priority retirement, such as waiting for an explicit
803 * bo.
804 *
805 * It is also used for periodic low-priority events, such as
806 * idle-timers and recording error state.
807 *
808 * All tasks on the workqueue are expected to acquire the dev mutex
809 * so there is no point in running more than one instance of the
810 * workqueue at any time. Use an ordered one.
811 */
812 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
813 if (dev_priv->wq == NULL)
814 goto out_err;
815
816 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
817 if (dev_priv->hotplug.dp_wq == NULL)
818 goto out_free_wq;
819
Chris Wilson0673ad42016-06-24 14:00:22 +0100820 return 0;
821
Chris Wilson0673ad42016-06-24 14:00:22 +0100822out_free_wq:
823 destroy_workqueue(dev_priv->wq);
824out_err:
825 DRM_ERROR("Failed to allocate workqueues.\n");
826
827 return -ENOMEM;
828}
829
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000830static void i915_engines_cleanup(struct drm_i915_private *i915)
831{
832 struct intel_engine_cs *engine;
833 enum intel_engine_id id;
834
835 for_each_engine(engine, i915, id)
836 kfree(engine);
837}
838
Chris Wilson0673ad42016-06-24 14:00:22 +0100839static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
840{
Chris Wilson0673ad42016-06-24 14:00:22 +0100841 destroy_workqueue(dev_priv->hotplug.dp_wq);
842 destroy_workqueue(dev_priv->wq);
843}
844
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300845/*
846 * We don't keep the workarounds for pre-production hardware, so we expect our
847 * driver to fail on these machines in one way or another. A little warning on
848 * dmesg may help both the user and the bug triagers.
Chris Wilson6a7a6a92017-11-17 10:26:35 +0000849 *
850 * Our policy for removing pre-production workarounds is to keep the
851 * current gen workarounds as a guide to the bring-up of the next gen
852 * (workarounds have a habit of persisting!). Anything older than that
853 * should be removed along with the complications they introduce.
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300854 */
855static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
856{
Chris Wilson248a1242017-01-30 10:44:56 +0000857 bool pre = false;
858
859 pre |= IS_HSW_EARLY_SDV(dev_priv);
860 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
Chris Wilson0102ba12017-01-30 10:44:58 +0000861 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
Chris Wilson248a1242017-01-30 10:44:56 +0000862
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000863 if (pre) {
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300864 DRM_ERROR("This is a pre-production stepping. "
865 "It may not be fully functional.\n");
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000866 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
867 }
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300868}
869
Chris Wilson0673ad42016-06-24 14:00:22 +0100870/**
871 * i915_driver_init_early - setup state not requiring device access
872 * @dev_priv: device private
873 *
874 * Initialize everything that is a "SW-only" state, that is state not
875 * requiring accessing the device or exposing the driver via kernel internal
876 * or userspace interfaces. Example steps belonging here: lock initialization,
877 * system memory allocation, setting up device specific attributes and
878 * function hooks not requiring accessing the device.
879 */
Chris Wilson55ac5a12018-09-05 15:09:20 +0100880static int i915_driver_init_early(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100881{
Chris Wilson0673ad42016-06-24 14:00:22 +0100882 int ret = 0;
883
884 if (i915_inject_load_failure())
885 return -ENODEV;
886
Chris Wilson0673ad42016-06-24 14:00:22 +0100887 spin_lock_init(&dev_priv->irq_lock);
888 spin_lock_init(&dev_priv->gpu_error.lock);
889 mutex_init(&dev_priv->backlight_lock);
890 spin_lock_init(&dev_priv->uncore.lock);
Lyude317eaa92017-02-03 21:18:25 -0500891
Chris Wilson0673ad42016-06-24 14:00:22 +0100892 mutex_init(&dev_priv->sb_lock);
Chris Wilson0673ad42016-06-24 14:00:22 +0100893 mutex_init(&dev_priv->av_mutex);
894 mutex_init(&dev_priv->wm.wm_mutex);
895 mutex_init(&dev_priv->pps_mutex);
896
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100897 i915_memcpy_init_early(dev_priv);
898
Chris Wilson0673ad42016-06-24 14:00:22 +0100899 ret = i915_workqueues_init(dev_priv);
900 if (ret < 0)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000901 goto err_engines;
Chris Wilson0673ad42016-06-24 14:00:22 +0100902
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000903 ret = i915_gem_init_early(dev_priv);
904 if (ret < 0)
905 goto err_workqueues;
906
Chris Wilson0673ad42016-06-24 14:00:22 +0100907 /* This must be called before any calls to HAS_PCH_* */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000908 intel_detect_pch(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100909
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000910 intel_wopcm_init_early(&dev_priv->wopcm);
911 intel_uc_init_early(dev_priv);
Tvrtko Ursulin192aa182016-12-01 14:16:45 +0000912 intel_pm_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100913 intel_init_dpio(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300914 ret = intel_power_domains_init(dev_priv);
915 if (ret < 0)
916 goto err_uc;
Chris Wilson0673ad42016-06-24 14:00:22 +0100917 intel_irq_init(dev_priv);
Mika Kuoppala3ac168a2016-11-01 18:43:03 +0200918 intel_hangcheck_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100919 intel_init_display_hooks(dev_priv);
920 intel_init_clock_gating_hooks(dev_priv);
921 intel_init_audio_hooks(dev_priv);
David Weinehall36cdd012016-08-22 13:59:31 +0300922 intel_display_crc_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100923
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300924 intel_detect_preproduction_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100925
926 return 0;
927
Imre Deakf28ec6f2018-08-06 12:58:37 +0300928err_uc:
929 intel_uc_cleanup_early(dev_priv);
930 i915_gem_cleanup_early(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000931err_workqueues:
Chris Wilson0673ad42016-06-24 14:00:22 +0100932 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000933err_engines:
934 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100935 return ret;
936}
937
938/**
939 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
940 * @dev_priv: device private
941 */
942static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
943{
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300944 intel_irq_fini(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300945 intel_power_domains_cleanup(dev_priv);
Michal Wajdeczko8c650ae2018-03-23 12:34:50 +0000946 intel_uc_cleanup_early(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000947 i915_gem_cleanup_early(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100948 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000949 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100950}
951
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000952static int i915_mmio_setup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100953{
David Weinehall52a05c32016-08-22 13:32:44 +0300954 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100955 int mmio_bar;
956 int mmio_size;
957
Tvrtko Ursulin5db94012016-10-13 11:03:10 +0100958 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100959 /*
960 * Before gen4, the registers and the GTT are behind different BARs.
961 * However, from gen4 onwards, the registers and the GTT are shared
962 * in the same BAR, so we want to restrict this ioremap from
963 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
964 * the register BAR remains the same size for all the earlier
965 * generations up to Ironlake.
966 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000967 if (INTEL_GEN(dev_priv) < 5)
Chris Wilson0673ad42016-06-24 14:00:22 +0100968 mmio_size = 512 * 1024;
969 else
970 mmio_size = 2 * 1024 * 1024;
David Weinehall52a05c32016-08-22 13:32:44 +0300971 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
Chris Wilson0673ad42016-06-24 14:00:22 +0100972 if (dev_priv->regs == NULL) {
973 DRM_ERROR("failed to map registers\n");
974
975 return -EIO;
976 }
977
978 /* Try to make sure MCHBAR is enabled before poking at it */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000979 intel_setup_mchbar(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100980
981 return 0;
982}
983
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000984static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100985{
David Weinehall52a05c32016-08-22 13:32:44 +0300986 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100987
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000988 intel_teardown_mchbar(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300989 pci_iounmap(pdev, dev_priv->regs);
Chris Wilson0673ad42016-06-24 14:00:22 +0100990}
991
992/**
993 * i915_driver_init_mmio - setup device MMIO
994 * @dev_priv: device private
995 *
996 * Setup minimal device state necessary for MMIO accesses later in the
997 * initialization sequence. The setup here should avoid any other device-wide
998 * side effects or exposing the driver via kernel internal or user space
999 * interfaces.
1000 */
1001static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1002{
Chris Wilson0673ad42016-06-24 14:00:22 +01001003 int ret;
1004
1005 if (i915_inject_load_failure())
1006 return -ENODEV;
1007
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001008 if (i915_get_bridge_dev(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +01001009 return -EIO;
1010
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001011 ret = i915_mmio_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001012 if (ret < 0)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001013 goto err_bridge;
Chris Wilson0673ad42016-06-24 14:00:22 +01001014
1015 intel_uncore_init(dev_priv);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001016
Oscar Mateo26376a72018-03-16 14:14:49 +02001017 intel_device_info_init_mmio(dev_priv);
1018
1019 intel_uncore_prune(dev_priv);
1020
Sagar Arun Kamble1fc556f2017-10-04 15:33:24 +00001021 intel_uc_init_mmio(dev_priv);
1022
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001023 ret = intel_engines_init_mmio(dev_priv);
1024 if (ret)
1025 goto err_uncore;
1026
Chris Wilson24145512017-01-24 11:01:35 +00001027 i915_gem_init_mmio(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001028
1029 return 0;
1030
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001031err_uncore:
1032 intel_uncore_fini(dev_priv);
1033err_bridge:
Chris Wilson0673ad42016-06-24 14:00:22 +01001034 pci_dev_put(dev_priv->bridge_dev);
1035
1036 return ret;
1037}
1038
1039/**
1040 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1041 * @dev_priv: device private
1042 */
1043static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1044{
Chris Wilson0673ad42016-06-24 14:00:22 +01001045 intel_uncore_fini(dev_priv);
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001046 i915_mmio_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001047 pci_dev_put(dev_priv->bridge_dev);
1048}
1049
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001050static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1051{
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001052 /*
1053 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1054 * user's requested state against the hardware/driver capabilities. We
1055 * do this now so that we can print out any log messages once rather
1056 * than every time we check intel_enable_ppgtt().
1057 */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001058 i915_modparams.enable_ppgtt =
1059 intel_sanitize_enable_ppgtt(dev_priv,
1060 i915_modparams.enable_ppgtt);
1061 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
Chris Wilson39df9192016-07-20 13:31:57 +01001062
Chuanxiao Dong67b7f332017-05-27 17:44:17 +08001063 intel_gvt_sanitize_options(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001064}
1065
Chris Wilson0673ad42016-06-24 14:00:22 +01001066/**
1067 * i915_driver_init_hw - setup state requiring device access
1068 * @dev_priv: device private
1069 *
1070 * Setup state that requires accessing the device, but doesn't require
1071 * exposing the driver via kernel internal or userspace interfaces.
1072 */
1073static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1074{
David Weinehall52a05c32016-08-22 13:32:44 +03001075 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001076 int ret;
1077
1078 if (i915_inject_load_failure())
1079 return -ENODEV;
1080
Michal Wajdeczko6a7e51f2017-12-21 21:57:33 +00001081 intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001082
1083 intel_sanitize_options(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001084
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01001085 i915_perf_init(dev_priv);
1086
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001087 ret = i915_ggtt_probe_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001088 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +01001089 goto err_perf;
Chris Wilson0673ad42016-06-24 14:00:22 +01001090
Chris Wilson9f172f62018-04-14 10:12:33 +01001091 /*
1092 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1093 * otherwise the vga fbdev driver falls over.
1094 */
Chris Wilson0673ad42016-06-24 14:00:22 +01001095 ret = i915_kick_out_firmware_fb(dev_priv);
1096 if (ret) {
1097 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001098 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001099 }
1100
1101 ret = i915_kick_out_vgacon(dev_priv);
1102 if (ret) {
1103 DRM_ERROR("failed to remove conflicting VGA console\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001104 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001105 }
1106
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001107 ret = i915_ggtt_init_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001108 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +01001109 goto err_ggtt;
Chris Wilson0088e522016-08-04 07:52:21 +01001110
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001111 ret = i915_ggtt_enable_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001112 if (ret) {
1113 DRM_ERROR("failed to enable GGTT\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001114 goto err_ggtt;
Chris Wilson0088e522016-08-04 07:52:21 +01001115 }
1116
David Weinehall52a05c32016-08-22 13:32:44 +03001117 pci_set_master(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001118
1119 /* overlay on gen2 is broken and can't address above 1G */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001120 if (IS_GEN2(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001121 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
Chris Wilson0673ad42016-06-24 14:00:22 +01001122 if (ret) {
1123 DRM_ERROR("failed to set DMA mask\n");
1124
Chris Wilson9f172f62018-04-14 10:12:33 +01001125 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001126 }
1127 }
1128
Chris Wilson0673ad42016-06-24 14:00:22 +01001129 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1130 * using 32bit addressing, overwriting memory if HWS is located
1131 * above 4GB.
1132 *
1133 * The documentation also mentions an issue with undefined
1134 * behaviour if any general state is accessed within a page above 4GB,
1135 * which also needs to be handled carefully.
1136 */
Jani Nikulac0f86832016-12-07 12:13:04 +02001137 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001138 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Chris Wilson0673ad42016-06-24 14:00:22 +01001139
1140 if (ret) {
1141 DRM_ERROR("failed to set DMA mask\n");
1142
Chris Wilson9f172f62018-04-14 10:12:33 +01001143 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001144 }
1145 }
1146
Chris Wilson0673ad42016-06-24 14:00:22 +01001147 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1148 PM_QOS_DEFAULT_VALUE);
1149
1150 intel_uncore_sanitize(dev_priv);
1151
Chris Wilson0673ad42016-06-24 14:00:22 +01001152 i915_gem_load_init_fences(dev_priv);
1153
1154 /* On the 945G/GM, the chipset reports the MSI capability on the
1155 * integrated graphics even though the support isn't actually there
1156 * according to the published specs. It doesn't appear to function
1157 * correctly in testing on 945G.
1158 * This may be a side effect of MSI having been made available for PEG
1159 * and the registers being closely associated.
1160 *
1161 * According to chipset errata, on the 965GM, MSI interrupts may
Ville Syrjäläe38c2da2017-06-26 23:30:51 +03001162 * be lost or delayed, and was defeatured. MSI interrupts seem to
1163 * get lost on g4x as well, and interrupt delivery seems to stay
1164 * properly dead afterwards. So we'll just disable them for all
1165 * pre-gen5 chipsets.
Lucas De Marchi8a29c772018-05-23 11:04:35 -07001166 *
1167 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1168 * interrupts even when in MSI mode. This results in spurious
1169 * interrupt warnings if the legacy irq no. is shared with another
1170 * device. The kernel then disables that interrupt source and so
1171 * prevents the other device from working properly.
Chris Wilson0673ad42016-06-24 14:00:22 +01001172 */
Ville Syrjäläe38c2da2017-06-26 23:30:51 +03001173 if (INTEL_GEN(dev_priv) >= 5) {
David Weinehall52a05c32016-08-22 13:32:44 +03001174 if (pci_enable_msi(pdev) < 0)
Chris Wilson0673ad42016-06-24 14:00:22 +01001175 DRM_DEBUG_DRIVER("can't enable MSI");
1176 }
1177
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001178 ret = intel_gvt_init(dev_priv);
1179 if (ret)
Chris Wilson7ab87ed2018-07-10 15:38:21 +01001180 goto err_msi;
1181
1182 intel_opregion_setup(dev_priv);
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001183
Chris Wilson0673ad42016-06-24 14:00:22 +01001184 return 0;
1185
Chris Wilson7ab87ed2018-07-10 15:38:21 +01001186err_msi:
1187 if (pdev->msi_enabled)
1188 pci_disable_msi(pdev);
1189 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson9f172f62018-04-14 10:12:33 +01001190err_ggtt:
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001191 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson9f172f62018-04-14 10:12:33 +01001192err_perf:
1193 i915_perf_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001194 return ret;
1195}
1196
1197/**
1198 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1199 * @dev_priv: device private
1200 */
1201static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1202{
David Weinehall52a05c32016-08-22 13:32:44 +03001203 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001204
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01001205 i915_perf_fini(dev_priv);
1206
David Weinehall52a05c32016-08-22 13:32:44 +03001207 if (pdev->msi_enabled)
1208 pci_disable_msi(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001209
1210 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001211 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001212}
1213
1214/**
1215 * i915_driver_register - register the driver with the rest of the system
1216 * @dev_priv: device private
1217 *
1218 * Perform any steps necessary to make the driver available via kernel
1219 * internal or userspace interfaces.
1220 */
1221static void i915_driver_register(struct drm_i915_private *dev_priv)
1222{
Chris Wilson91c8a322016-07-05 10:40:23 +01001223 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +01001224
Chris Wilson848b3652017-11-23 11:53:37 +00001225 i915_gem_shrinker_register(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001226 i915_pmu_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001227
1228 /*
1229 * Notify a valid surface after modesetting,
1230 * when running inside a VM.
1231 */
1232 if (intel_vgpu_active(dev_priv))
1233 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1234
1235 /* Reveal our presence to userspace */
1236 if (drm_dev_register(dev, 0) == 0) {
1237 i915_debugfs_register(dev_priv);
David Weinehall694c2822016-08-22 13:32:43 +03001238 i915_setup_sysfs(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00001239
1240 /* Depends on sysfs having been initialized */
1241 i915_perf_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001242 } else
1243 DRM_ERROR("Failed to register driver for userspace access!\n");
1244
1245 if (INTEL_INFO(dev_priv)->num_pipes) {
1246 /* Must be done after probing outputs */
1247 intel_opregion_register(dev_priv);
1248 acpi_video_register();
1249 }
1250
1251 if (IS_GEN5(dev_priv))
1252 intel_gpu_ips_init(dev_priv);
1253
Jerome Anandeef57322017-01-25 04:27:49 +05301254 intel_audio_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001255
1256 /*
1257 * Some ports require correctly set-up hpd registers for detection to
1258 * work properly (leading to ghost connected connector status), e.g. VGA
1259 * on gm45. Hence we can only set up the initial fbdev config after hpd
1260 * irqs are fully enabled. We do it last so that the async config
1261 * cannot run before the connectors are registered.
1262 */
1263 intel_fbdev_initial_config_async(dev);
Chris Wilson448aa912017-11-28 11:01:47 +00001264
1265 /*
1266 * We need to coordinate the hotplugs with the asynchronous fbdev
1267 * configuration, for which we use the fbdev->async_cookie.
1268 */
1269 if (INTEL_INFO(dev_priv)->num_pipes)
1270 drm_kms_helper_poll_init(dev);
Chris Wilson07d80572018-08-16 15:37:56 +03001271
Imre Deak2cd9a682018-08-16 15:37:57 +03001272 intel_power_domains_enable(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001273 intel_runtime_pm_enable(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001274}
1275
1276/**
1277 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1278 * @dev_priv: device private
1279 */
1280static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1281{
Chris Wilson07d80572018-08-16 15:37:56 +03001282 intel_runtime_pm_disable(dev_priv);
Imre Deak2cd9a682018-08-16 15:37:57 +03001283 intel_power_domains_disable(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001284
Daniel Vetter4f256d82017-07-15 00:46:55 +02001285 intel_fbdev_unregister(dev_priv);
Jerome Anandeef57322017-01-25 04:27:49 +05301286 intel_audio_deinit(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001287
Chris Wilson448aa912017-11-28 11:01:47 +00001288 /*
1289 * After flushing the fbdev (incl. a late async config which will
1290 * have delayed queuing of a hotplug event), then flush the hotplug
1291 * events.
1292 */
1293 drm_kms_helper_poll_fini(&dev_priv->drm);
1294
Chris Wilson0673ad42016-06-24 14:00:22 +01001295 intel_gpu_ips_teardown();
1296 acpi_video_unregister();
1297 intel_opregion_unregister(dev_priv);
1298
Robert Bragg442b8c02016-11-07 19:49:53 +00001299 i915_perf_unregister(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001300 i915_pmu_unregister(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00001301
David Weinehall694c2822016-08-22 13:32:43 +03001302 i915_teardown_sysfs(dev_priv);
Chris Wilson91c8a322016-07-05 10:40:23 +01001303 drm_dev_unregister(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001304
Chris Wilson848b3652017-11-23 11:53:37 +00001305 i915_gem_shrinker_unregister(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001306}
1307
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001308static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1309{
1310 if (drm_debug & DRM_UT_DRIVER) {
1311 struct drm_printer p = drm_debug_printer("i915 device info:");
1312
1313 intel_device_info_dump(&dev_priv->info, &p);
1314 intel_device_info_dump_runtime(&dev_priv->info, &p);
1315 }
1316
1317 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1318 DRM_INFO("DRM_I915_DEBUG enabled\n");
1319 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1320 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
Imre Deak6dfc4a82018-08-16 22:34:14 +03001321 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1322 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001323}
1324
Chris Wilson55ac5a12018-09-05 15:09:20 +01001325static struct drm_i915_private *
1326i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1327{
1328 const struct intel_device_info *match_info =
1329 (struct intel_device_info *)ent->driver_data;
1330 struct intel_device_info *device_info;
1331 struct drm_i915_private *i915;
1332
1333 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1334 if (!i915)
1335 return NULL;
1336
1337 if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
1338 kfree(i915);
1339 return NULL;
1340 }
1341
1342 i915->drm.pdev = pdev;
1343 i915->drm.dev_private = i915;
1344 pci_set_drvdata(pdev, &i915->drm);
1345
1346 /* Setup the write-once "constant" device info */
1347 device_info = mkwrite_device_info(i915);
1348 memcpy(device_info, match_info, sizeof(*device_info));
1349 device_info->device_id = pdev->device;
1350
1351 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1352 sizeof(device_info->platform_mask) * BITS_PER_BYTE);
1353 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1354
1355 return i915;
1356}
1357
Chris Wilson31962ca2018-09-05 15:09:21 +01001358static void i915_driver_destroy(struct drm_i915_private *i915)
1359{
1360 struct pci_dev *pdev = i915->drm.pdev;
1361
1362 drm_dev_fini(&i915->drm);
1363 kfree(i915);
1364
1365 /* And make sure we never chase our dangling pointer from pci_dev */
1366 pci_set_drvdata(pdev, NULL);
1367}
1368
Chris Wilson0673ad42016-06-24 14:00:22 +01001369/**
1370 * i915_driver_load - setup chip and create an initial config
Joonas Lahtinend2ad3ae2016-11-10 15:36:34 +02001371 * @pdev: PCI device
1372 * @ent: matching PCI ID entry
Chris Wilson0673ad42016-06-24 14:00:22 +01001373 *
1374 * The driver load routine has to do several things:
1375 * - drive output discovery via intel_modeset_init()
1376 * - initialize the memory manager
1377 * - allocate initial config memory
1378 * - setup the DRM framebuffer with the allocated memory
1379 */
Chris Wilson42f55512016-06-24 14:00:26 +01001380int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
Chris Wilson0673ad42016-06-24 14:00:22 +01001381{
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01001382 const struct intel_device_info *match_info =
1383 (struct intel_device_info *)ent->driver_data;
Chris Wilson0673ad42016-06-24 14:00:22 +01001384 struct drm_i915_private *dev_priv;
1385 int ret;
1386
Ville Syrjäläff4c3b72017-03-03 17:19:28 +02001387 /* Enable nuclear pageflip on ILK+ */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001388 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01001389 driver.driver_features &= ~DRIVER_ATOMIC;
Chris Wilsona09d0ba2016-06-24 14:00:27 +01001390
Chris Wilson55ac5a12018-09-05 15:09:20 +01001391 dev_priv = i915_driver_create(pdev, ent);
1392 if (!dev_priv)
1393 return -ENOMEM;
Chris Wilson0673ad42016-06-24 14:00:22 +01001394
1395 ret = pci_enable_device(pdev);
1396 if (ret)
Chris Wilsoncad36882017-02-10 16:35:21 +00001397 goto out_fini;
Chris Wilson0673ad42016-06-24 14:00:22 +01001398
Chris Wilson55ac5a12018-09-05 15:09:20 +01001399 ret = i915_driver_init_early(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001400 if (ret < 0)
1401 goto out_pci_disable;
1402
Imre Deak2cd9a682018-08-16 15:37:57 +03001403 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001404
1405 ret = i915_driver_init_mmio(dev_priv);
1406 if (ret < 0)
1407 goto out_runtime_pm_put;
1408
1409 ret = i915_driver_init_hw(dev_priv);
1410 if (ret < 0)
1411 goto out_cleanup_mmio;
1412
1413 /*
1414 * TODO: move the vblank init and parts of modeset init steps into one
1415 * of the i915_driver_init_/i915_driver_register functions according
1416 * to the role/effect of the given init step.
1417 */
1418 if (INTEL_INFO(dev_priv)->num_pipes) {
Chris Wilson91c8a322016-07-05 10:40:23 +01001419 ret = drm_vblank_init(&dev_priv->drm,
Chris Wilson0673ad42016-06-24 14:00:22 +01001420 INTEL_INFO(dev_priv)->num_pipes);
1421 if (ret)
1422 goto out_cleanup_hw;
1423 }
1424
Chris Wilson91c8a322016-07-05 10:40:23 +01001425 ret = i915_load_modeset_init(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001426 if (ret < 0)
Daniel Vetterbaf54382017-06-21 10:28:41 +02001427 goto out_cleanup_hw;
Chris Wilson0673ad42016-06-24 14:00:22 +01001428
1429 i915_driver_register(dev_priv);
1430
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +05301431 intel_init_ipc(dev_priv);
Mahesh Kumara3a89862016-12-01 21:19:34 +05301432
Imre Deak2cd9a682018-08-16 15:37:57 +03001433 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001434
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001435 i915_welcome_messages(dev_priv);
1436
Chris Wilson0673ad42016-06-24 14:00:22 +01001437 return 0;
1438
Chris Wilson0673ad42016-06-24 14:00:22 +01001439out_cleanup_hw:
1440 i915_driver_cleanup_hw(dev_priv);
1441out_cleanup_mmio:
1442 i915_driver_cleanup_mmio(dev_priv);
1443out_runtime_pm_put:
Imre Deak2cd9a682018-08-16 15:37:57 +03001444 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001445 i915_driver_cleanup_early(dev_priv);
1446out_pci_disable:
1447 pci_disable_device(pdev);
Chris Wilsoncad36882017-02-10 16:35:21 +00001448out_fini:
Chris Wilson0673ad42016-06-24 14:00:22 +01001449 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
Chris Wilson31962ca2018-09-05 15:09:21 +01001450 i915_driver_destroy(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001451 return ret;
1452}
1453
Chris Wilson42f55512016-06-24 14:00:26 +01001454void i915_driver_unload(struct drm_device *dev)
Chris Wilson0673ad42016-06-24 14:00:22 +01001455{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001456 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001457 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001458
Imre Deak2cd9a682018-08-16 15:37:57 +03001459 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001460
Daniel Vetter99c539b2017-07-15 00:46:56 +02001461 i915_driver_unregister(dev_priv);
1462
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001463 if (i915_gem_suspend(dev_priv))
Chris Wilson42f55512016-06-24 14:00:26 +01001464 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +01001465
Daniel Vetter18dddad2017-03-21 17:41:49 +01001466 drm_atomic_helper_shutdown(dev);
Maarten Lankhorsta667fb42016-12-15 15:29:44 +01001467
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001468 intel_gvt_cleanup(dev_priv);
1469
Chris Wilson0673ad42016-06-24 14:00:22 +01001470 intel_modeset_cleanup(dev);
1471
Hans de Goede785f0762018-02-14 09:21:49 +01001472 intel_bios_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001473
David Weinehall52a05c32016-08-22 13:32:44 +03001474 vga_switcheroo_unregister_client(pdev);
1475 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +01001476
1477 intel_csr_ucode_fini(dev_priv);
1478
1479 /* Free error state after interrupts are fully disabled. */
1480 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001481 i915_reset_error_state(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001482
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001483 i915_gem_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001484 intel_fbc_cleanup_cfb(dev_priv);
1485
Imre Deak48a287e2018-08-06 12:58:35 +03001486 intel_power_domains_fini_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001487
1488 i915_driver_cleanup_hw(dev_priv);
1489 i915_driver_cleanup_mmio(dev_priv);
1490
Imre Deak2cd9a682018-08-16 15:37:57 +03001491 enable_rpm_wakeref_asserts(dev_priv);
1492
Chris Wilson07d80572018-08-16 15:37:56 +03001493 WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
Chris Wilsoncad36882017-02-10 16:35:21 +00001494}
1495
1496static void i915_driver_release(struct drm_device *dev)
1497{
1498 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001499
1500 i915_driver_cleanup_early(dev_priv);
Chris Wilson31962ca2018-09-05 15:09:21 +01001501 i915_driver_destroy(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001502}
1503
1504static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1505{
Chris Wilson829a0af2017-06-20 12:05:45 +01001506 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001507 int ret;
1508
Chris Wilson829a0af2017-06-20 12:05:45 +01001509 ret = i915_gem_open(i915, file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001510 if (ret)
1511 return ret;
1512
1513 return 0;
1514}
1515
1516/**
1517 * i915_driver_lastclose - clean up after all DRM clients have exited
1518 * @dev: DRM device
1519 *
1520 * Take care of cleaning up after all DRM clients have exited. In the
1521 * mode setting case, we want to restore the kernel's initial mode (just
1522 * in case the last client left us in a bad state).
1523 *
1524 * Additionally, in the non-mode setting case, we'll tear down the GTT
1525 * and DMA structures, since the kernel won't be using them, and clea
1526 * up any GEM state.
1527 */
1528static void i915_driver_lastclose(struct drm_device *dev)
1529{
1530 intel_fbdev_restore_mode(dev);
1531 vga_switcheroo_process_delayed_switch();
1532}
1533
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001534static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
Chris Wilson0673ad42016-06-24 14:00:22 +01001535{
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001536 struct drm_i915_file_private *file_priv = file->driver_priv;
1537
Chris Wilson0673ad42016-06-24 14:00:22 +01001538 mutex_lock(&dev->struct_mutex);
Chris Wilson829a0af2017-06-20 12:05:45 +01001539 i915_gem_context_close(file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001540 i915_gem_release(dev, file);
1541 mutex_unlock(&dev->struct_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +01001542
1543 kfree(file_priv);
1544}
1545
Imre Deak07f9cd02014-08-18 14:42:45 +03001546static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1547{
Chris Wilson91c8a322016-07-05 10:40:23 +01001548 struct drm_device *dev = &dev_priv->drm;
Jani Nikula19c80542015-12-16 12:48:16 +02001549 struct intel_encoder *encoder;
Imre Deak07f9cd02014-08-18 14:42:45 +03001550
1551 drm_modeset_lock_all(dev);
Jani Nikula19c80542015-12-16 12:48:16 +02001552 for_each_intel_encoder(dev, encoder)
1553 if (encoder->suspend)
1554 encoder->suspend(encoder);
Imre Deak07f9cd02014-08-18 14:42:45 +03001555 drm_modeset_unlock_all(dev);
1556}
1557
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001558static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1559 bool rpm_resume);
Imre Deak507e1262016-04-20 20:27:54 +03001560static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
Suketu Shahf75a1982015-04-16 14:22:11 +05301561
Imre Deakbc872292015-11-18 17:32:30 +02001562static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1563{
1564#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1565 if (acpi_target_system_state() < ACPI_STATE_S3)
1566 return true;
1567#endif
1568 return false;
1569}
Sagar Kambleebc32822014-08-13 23:07:05 +05301570
Chris Wilson73b66f82018-05-25 10:26:29 +01001571static int i915_drm_prepare(struct drm_device *dev)
1572{
1573 struct drm_i915_private *i915 = to_i915(dev);
1574 int err;
1575
1576 /*
1577 * NB intel_display_suspend() may issue new requests after we've
1578 * ostensibly marked the GPU as ready-to-sleep here. We need to
1579 * split out that work and pull it forward so that after point,
1580 * the GPU is not woken again.
1581 */
1582 err = i915_gem_suspend(i915);
1583 if (err)
1584 dev_err(&i915->drm.pdev->dev,
1585 "GEM idle failed, suspend/resume might fail\n");
1586
1587 return err;
1588}
1589
Imre Deak5e365c32014-10-23 19:23:25 +03001590static int i915_drm_suspend(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001591{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001592 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001593 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnese5747e32014-06-12 08:35:47 -07001594 pci_power_t opregion_target_state;
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001595
Imre Deak1f814da2015-12-16 02:52:19 +02001596 disable_rpm_wakeref_asserts(dev_priv);
1597
Paulo Zanonic67a4702013-08-19 13:18:09 -03001598 /* We do a lot of poking in a lot of registers, make sure they work
1599 * properly. */
Imre Deak2cd9a682018-08-16 15:37:57 +03001600 intel_power_domains_disable(dev_priv);
Paulo Zanonicb107992013-01-25 16:59:15 -02001601
Dave Airlie5bcf7192010-12-07 09:20:40 +10001602 drm_kms_helper_poll_disable(dev);
1603
David Weinehall52a05c32016-08-22 13:32:44 +03001604 pci_save_state(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001605
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02001606 intel_display_suspend(dev);
Daniel Vetterd5818932015-02-23 12:03:26 +01001607
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03001608 intel_dp_mst_suspend(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001609
1610 intel_runtime_pm_disable_interrupts(dev_priv);
1611 intel_hpd_cancel_work(dev_priv);
1612
1613 intel_suspend_encoders(dev_priv);
1614
Ville Syrjälä712bf362016-10-31 22:37:23 +02001615 intel_suspend_hw(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001616
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001617 i915_gem_suspend_gtt_mappings(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07001618
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00001619 i915_save_state(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001620
Imre Deakbc872292015-11-18 17:32:30 +02001621 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001622 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
Jesse Barnese5747e32014-06-12 08:35:47 -07001623
Chris Wilson03d92e42016-05-23 15:08:10 +01001624 intel_opregion_unregister(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001625
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001626 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Dave Airlie3fa016a2012-03-28 10:48:49 +01001627
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001628 dev_priv->suspend_count++;
1629
Imre Deakf74ed082016-04-18 14:48:21 +03001630 intel_csr_ucode_suspend(dev_priv);
Imre Deakf514c2d2015-10-28 23:59:06 +02001631
Imre Deak1f814da2015-12-16 02:52:19 +02001632 enable_rpm_wakeref_asserts(dev_priv);
1633
Chris Wilson73b66f82018-05-25 10:26:29 +01001634 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001635}
1636
Imre Deak2cd9a682018-08-16 15:37:57 +03001637static enum i915_drm_suspend_mode
1638get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1639{
1640 if (hibernate)
1641 return I915_DRM_SUSPEND_HIBERNATE;
1642
1643 if (suspend_to_idle(dev_priv))
1644 return I915_DRM_SUSPEND_IDLE;
1645
1646 return I915_DRM_SUSPEND_MEM;
1647}
1648
David Weinehallc49d13e2016-08-22 13:32:42 +03001649static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
Imre Deakc3c09c92014-10-23 19:23:15 +03001650{
David Weinehallc49d13e2016-08-22 13:32:42 +03001651 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001652 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deakc3c09c92014-10-23 19:23:15 +03001653 int ret;
1654
Imre Deak1f814da2015-12-16 02:52:19 +02001655 disable_rpm_wakeref_asserts(dev_priv);
1656
Chris Wilsonec92ad02018-05-31 09:22:46 +01001657 i915_gem_suspend_late(dev_priv);
1658
Chris Wilsonec92ad02018-05-31 09:22:46 +01001659 intel_uncore_suspend(dev_priv);
Imre Deak4c494a52016-10-13 14:34:06 +03001660
Imre Deak2cd9a682018-08-16 15:37:57 +03001661 intel_power_domains_suspend(dev_priv,
1662 get_suspend_mode(dev_priv, hibernation));
Imre Deak73dfc222015-11-17 17:33:53 +02001663
Imre Deak507e1262016-04-20 20:27:54 +03001664 ret = 0;
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001665 if (IS_GEN9_LP(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001666 bxt_enable_dc9(dev_priv);
Imre Deakb8aea3d12016-04-20 20:27:55 +03001667 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03001668 hsw_enable_pc8(dev_priv);
1669 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1670 ret = vlv_suspend_complete(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001671
1672 if (ret) {
1673 DRM_ERROR("Suspend complete failed: %d\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03001674 intel_power_domains_resume(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001675
Imre Deak1f814da2015-12-16 02:52:19 +02001676 goto out;
Imre Deakc3c09c92014-10-23 19:23:15 +03001677 }
1678
David Weinehall52a05c32016-08-22 13:32:44 +03001679 pci_disable_device(pdev);
Imre Deakab3be732015-03-02 13:04:41 +02001680 /*
Imre Deak54875572015-06-30 17:06:47 +03001681 * During hibernation on some platforms the BIOS may try to access
Imre Deakab3be732015-03-02 13:04:41 +02001682 * the device even though it's already in D3 and hang the machine. So
1683 * leave the device in D0 on those platforms and hope the BIOS will
Imre Deak54875572015-06-30 17:06:47 +03001684 * power down the device properly. The issue was seen on multiple old
1685 * GENs with different BIOS vendors, so having an explicit blacklist
1686 * is inpractical; apply the workaround on everything pre GEN6. The
1687 * platforms where the issue was seen:
1688 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1689 * Fujitsu FSC S7110
1690 * Acer Aspire 1830T
Imre Deakab3be732015-03-02 13:04:41 +02001691 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +00001692 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
David Weinehall52a05c32016-08-22 13:32:44 +03001693 pci_set_power_state(pdev, PCI_D3hot);
Imre Deakc3c09c92014-10-23 19:23:15 +03001694
Imre Deak1f814da2015-12-16 02:52:19 +02001695out:
1696 enable_rpm_wakeref_asserts(dev_priv);
1697
1698 return ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03001699}
1700
Matthew Aulda9a251c2016-12-02 10:24:11 +00001701static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001702{
1703 int error;
1704
Chris Wilsonded8b072016-07-05 10:40:22 +01001705 if (!dev) {
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001706 DRM_ERROR("dev: %p\n", dev);
Keith Packard1ae8c0a2009-06-28 15:42:17 -07001707 DRM_ERROR("DRM not initialized, aborting suspend.\n");
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001708 return -ENODEV;
1709 }
1710
Imre Deak0b14cbd2014-09-10 18:16:55 +03001711 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1712 state.event != PM_EVENT_FREEZE))
1713 return -EINVAL;
Dave Airlie5bcf7192010-12-07 09:20:40 +10001714
1715 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1716 return 0;
Chris Wilson6eecba32010-09-08 09:45:11 +01001717
Imre Deak5e365c32014-10-23 19:23:25 +03001718 error = i915_drm_suspend(dev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001719 if (error)
1720 return error;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001721
Imre Deakab3be732015-03-02 13:04:41 +02001722 return i915_drm_suspend_late(dev, false);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001723}
1724
Imre Deak5e365c32014-10-23 19:23:25 +03001725static int i915_drm_resume(struct drm_device *dev)
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001726{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001727 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001728 int ret;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01001729
Imre Deak1f814da2015-12-16 02:52:19 +02001730 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonabc80ab2016-08-24 10:27:01 +01001731 intel_sanitize_gt_powersave(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02001732
Chris Wilson12887862018-06-14 10:40:59 +01001733 i915_gem_sanitize(dev_priv);
1734
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001735 ret = i915_ggtt_enable_hw(dev_priv);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001736 if (ret)
1737 DRM_ERROR("failed to re-enable GGTT\n");
1738
Imre Deakf74ed082016-04-18 14:48:21 +03001739 intel_csr_ucode_resume(dev_priv);
1740
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00001741 i915_restore_state(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +03001742 intel_pps_unlock_regs_wa(dev_priv);
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001743 intel_opregion_setup(dev_priv);
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001744
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02001745 intel_init_pch_refclk(dev_priv);
Chris Wilson1833b132012-05-09 11:56:28 +01001746
Peter Antoine364aece2015-05-11 08:50:45 +01001747 /*
1748 * Interrupts have to be enabled before any batches are run. If not the
1749 * GPU will hang. i915_gem_init_hw() will initiate batches to
1750 * update/restore the context.
1751 *
Imre Deak908764f2016-11-29 21:40:29 +02001752 * drm_mode_config_reset() needs AUX interrupts.
1753 *
Peter Antoine364aece2015-05-11 08:50:45 +01001754 * Modeset enabling in intel_modeset_init_hw() also needs working
1755 * interrupts.
1756 */
1757 intel_runtime_pm_enable_interrupts(dev_priv);
1758
Imre Deak908764f2016-11-29 21:40:29 +02001759 drm_mode_config_reset(dev);
1760
Chris Wilson37cd3302017-11-12 11:27:38 +00001761 i915_gem_resume(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001762
Daniel Vetterd5818932015-02-23 12:03:26 +01001763 intel_modeset_init_hw(dev);
Ville Syrjälä675f7ff2017-11-16 18:02:15 +02001764 intel_init_clock_gating(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001765
1766 spin_lock_irq(&dev_priv->irq_lock);
1767 if (dev_priv->display.hpd_irq_setup)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001768 dev_priv->display.hpd_irq_setup(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001769 spin_unlock_irq(&dev_priv->irq_lock);
1770
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03001771 intel_dp_mst_resume(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001772
Lyudea16b7652016-03-11 10:57:01 -05001773 intel_display_resume(dev);
1774
Lyudee0b70062016-11-01 21:06:30 -04001775 drm_kms_helper_poll_enable(dev);
1776
Daniel Vetterd5818932015-02-23 12:03:26 +01001777 /*
1778 * ... but also need to make sure that hotplug processing
1779 * doesn't cause havoc. Like in the driver load code we don't
Gwan-gyeong Munc444ad72018-08-03 19:41:50 +03001780 * bother with the tiny race here where we might lose hotplug
Daniel Vetterd5818932015-02-23 12:03:26 +01001781 * notifications.
1782 * */
1783 intel_hpd_init(dev_priv);
Jesse Barnes1daed3f2011-01-05 12:01:25 -08001784
Chris Wilson03d92e42016-05-23 15:08:10 +01001785 intel_opregion_register(dev_priv);
Chris Wilson44834a62010-08-19 16:09:23 +01001786
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001787 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Jesse Barnes073f34d2012-11-02 11:13:59 -07001788
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001789 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Jesse Barnese5747e32014-06-12 08:35:47 -07001790
Imre Deak2cd9a682018-08-16 15:37:57 +03001791 intel_power_domains_enable(dev_priv);
1792
Imre Deak1f814da2015-12-16 02:52:19 +02001793 enable_rpm_wakeref_asserts(dev_priv);
1794
Chris Wilson074c6ad2014-04-09 09:19:43 +01001795 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001796}
1797
Imre Deak5e365c32014-10-23 19:23:25 +03001798static int i915_drm_resume_early(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001799{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001800 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001801 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deak44410cd2016-04-18 14:45:54 +03001802 int ret;
Imre Deak36d61e62014-10-23 19:23:24 +03001803
Imre Deak76c4b252014-04-01 19:55:22 +03001804 /*
1805 * We have a resume ordering issue with the snd-hda driver also
1806 * requiring our device to be power up. Due to the lack of a
1807 * parent/child relationship we currently solve this with an early
1808 * resume hook.
1809 *
1810 * FIXME: This should be solved with a special hdmi sink device or
1811 * similar so that power domains can be employed.
1812 */
Imre Deak44410cd2016-04-18 14:45:54 +03001813
1814 /*
1815 * Note that we need to set the power state explicitly, since we
1816 * powered off the device during freeze and the PCI core won't power
1817 * it back up for us during thaw. Powering off the device during
1818 * freeze is not a hard requirement though, and during the
1819 * suspend/resume phases the PCI core makes sure we get here with the
1820 * device powered on. So in case we change our freeze logic and keep
1821 * the device powered we can also remove the following set power state
1822 * call.
1823 */
David Weinehall52a05c32016-08-22 13:32:44 +03001824 ret = pci_set_power_state(pdev, PCI_D0);
Imre Deak44410cd2016-04-18 14:45:54 +03001825 if (ret) {
1826 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03001827 return ret;
Imre Deak44410cd2016-04-18 14:45:54 +03001828 }
1829
1830 /*
1831 * Note that pci_enable_device() first enables any parent bridge
1832 * device and only then sets the power state for this device. The
1833 * bridge enabling is a nop though, since bridge devices are resumed
1834 * first. The order of enabling power and enabling the device is
1835 * imposed by the PCI core as described above, so here we preserve the
1836 * same order for the freeze/thaw phases.
1837 *
1838 * TODO: eventually we should remove pci_disable_device() /
1839 * pci_enable_enable_device() from suspend/resume. Due to how they
1840 * depend on the device enable refcount we can't anyway depend on them
1841 * disabling/enabling the device.
1842 */
Imre Deak2cd9a682018-08-16 15:37:57 +03001843 if (pci_enable_device(pdev))
1844 return -EIO;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001845
David Weinehall52a05c32016-08-22 13:32:44 +03001846 pci_set_master(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001847
Imre Deak1f814da2015-12-16 02:52:19 +02001848 disable_rpm_wakeref_asserts(dev_priv);
1849
Wayne Boyer666a4532015-12-09 12:29:35 -08001850 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001851 ret = vlv_resume_prepare(dev_priv, false);
Imre Deak36d61e62014-10-23 19:23:24 +03001852 if (ret)
Damien Lespiauff0b1872015-05-20 14:45:15 +01001853 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1854 ret);
Imre Deak36d61e62014-10-23 19:23:24 +03001855
Hans de Goede68f60942017-02-10 11:28:01 +01001856 intel_uncore_resume_early(dev_priv);
Paulo Zanoniefee8332014-10-27 17:54:33 -02001857
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02001858 if (IS_GEN9_LP(dev_priv)) {
Imre Deak0f906032018-03-22 16:36:42 +02001859 gen9_sanitize_dc_state(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03001860 bxt_disable_dc9(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001861 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Damien Lespiaua9a6b732015-05-20 14:45:14 +01001862 hsw_disable_pc8(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03001863 }
Paulo Zanoniefee8332014-10-27 17:54:33 -02001864
Chris Wilsondc979972016-05-10 14:10:04 +01001865 intel_uncore_sanitize(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02001866
Imre Deak2cd9a682018-08-16 15:37:57 +03001867 intel_power_domains_resume(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02001868
Chris Wilson4fdd5b42018-06-16 21:25:34 +01001869 intel_engines_sanitize(dev_priv);
1870
Imre Deak6e35e8a2016-04-18 10:04:19 +03001871 enable_rpm_wakeref_asserts(dev_priv);
1872
Imre Deak36d61e62014-10-23 19:23:24 +03001873 return ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001874}
1875
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +00001876static int i915_resume_switcheroo(struct drm_device *dev)
Imre Deak76c4b252014-04-01 19:55:22 +03001877{
Imre Deak50a00722014-10-23 19:23:17 +03001878 int ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001879
Imre Deak097dd832014-10-23 19:23:19 +03001880 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1881 return 0;
1882
Imre Deak5e365c32014-10-23 19:23:25 +03001883 ret = i915_drm_resume_early(dev);
Imre Deak50a00722014-10-23 19:23:17 +03001884 if (ret)
1885 return ret;
1886
Imre Deak5a175142014-10-23 19:23:18 +03001887 return i915_drm_resume(dev);
1888}
1889
Ben Gamari11ed50e2009-09-14 17:48:45 -04001890/**
Eugeni Dodonovf3953dc2011-11-28 16:15:17 -02001891 * i915_reset - reset chip after a hang
Chris Wilson535275d2017-07-21 13:32:37 +01001892 * @i915: #drm_i915_private to reset
Chris Wilsond0667e92018-04-06 23:03:54 +01001893 * @stalled_mask: mask of the stalled engines with the guilty requests
1894 * @reason: user error message for why we are resetting
Ben Gamari11ed50e2009-09-14 17:48:45 -04001895 *
Chris Wilson780f2622016-09-09 14:11:52 +01001896 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1897 * on failure.
Ben Gamari11ed50e2009-09-14 17:48:45 -04001898 *
Chris Wilson221fe792016-09-09 14:11:51 +01001899 * Caller must hold the struct_mutex.
1900 *
Ben Gamari11ed50e2009-09-14 17:48:45 -04001901 * Procedure is fairly simple:
1902 * - reset the chip using the reset reg
1903 * - re-init context state
1904 * - re-init hardware status page
1905 * - re-init ring buffer
1906 * - re-init interrupt state
1907 * - re-init display
1908 */
Chris Wilsond0667e92018-04-06 23:03:54 +01001909void i915_reset(struct drm_i915_private *i915,
1910 unsigned int stalled_mask,
1911 const char *reason)
Ben Gamari11ed50e2009-09-14 17:48:45 -04001912{
Chris Wilson535275d2017-07-21 13:32:37 +01001913 struct i915_gpu_error *error = &i915->gpu_error;
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001914 int ret;
Chris Wilsonf7096d42017-12-01 12:20:11 +00001915 int i;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001916
Chris Wilson02866672018-03-30 14:18:01 +01001917 GEM_TRACE("flags=%lx\n", error->flags);
1918
Chris Wilsonf7096d42017-12-01 12:20:11 +00001919 might_sleep();
Chris Wilson535275d2017-07-21 13:32:37 +01001920 lockdep_assert_held(&i915->drm.struct_mutex);
Chris Wilson8c185ec2017-03-16 17:13:02 +00001921 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
Chris Wilson221fe792016-09-09 14:11:51 +01001922
Chris Wilson8c185ec2017-03-16 17:13:02 +00001923 if (!test_bit(I915_RESET_HANDOFF, &error->flags))
Chris Wilson780f2622016-09-09 14:11:52 +01001924 return;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001925
Chris Wilsond98c52c2016-04-13 17:35:05 +01001926 /* Clear any previous failed attempts at recovery. Time to try again. */
Chris Wilson535275d2017-07-21 13:32:37 +01001927 if (!i915_gem_unset_wedged(i915))
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001928 goto wakeup;
1929
Chris Wilsond0667e92018-04-06 23:03:54 +01001930 if (reason)
1931 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
Chris Wilson8af29b02016-09-09 14:11:47 +01001932 error->reset_count++;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001933
Chris Wilson535275d2017-07-21 13:32:37 +01001934 ret = i915_gem_reset_prepare(i915);
Chris Wilson0e178ae2017-01-17 17:59:06 +02001935 if (ret) {
Chris Wilson107783d2017-12-05 17:27:57 +00001936 dev_err(i915->drm.dev, "GPU recovery failed\n");
Chris Wilson107783d2017-12-05 17:27:57 +00001937 goto taint;
Chris Wilson0e178ae2017-01-17 17:59:06 +02001938 }
Chris Wilson9e60ab02016-10-04 21:11:28 +01001939
Chris Wilsonf7096d42017-12-01 12:20:11 +00001940 if (!intel_has_gpu_reset(i915)) {
Chris Wilson3ef98f52017-12-11 20:40:40 +00001941 if (i915_modparams.reset)
1942 dev_err(i915->drm.dev, "GPU reset not supported\n");
1943 else
1944 DRM_DEBUG_DRIVER("GPU reset disabled\n");
Chris Wilsonf7096d42017-12-01 12:20:11 +00001945 goto error;
1946 }
1947
1948 for (i = 0; i < 3; i++) {
1949 ret = intel_gpu_reset(i915, ALL_ENGINES);
1950 if (ret == 0)
1951 break;
1952
1953 msleep(100);
1954 }
Kenneth Graunke0573ed42010-09-11 03:17:19 -07001955 if (ret) {
Chris Wilsonf7096d42017-12-01 12:20:11 +00001956 dev_err(i915->drm.dev, "Failed to reset chip\n");
Chris Wilson107783d2017-12-05 17:27:57 +00001957 goto taint;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001958 }
1959
1960 /* Ok, now get things going again... */
1961
1962 /*
1963 * Everything depends on having the GTT running, so we need to start
Chris Wilson0db8c962017-09-06 12:14:05 +01001964 * there.
1965 */
1966 ret = i915_ggtt_enable_hw(i915);
1967 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001968 DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
1969 ret);
Chris Wilson0db8c962017-09-06 12:14:05 +01001970 goto error;
1971 }
1972
Chris Wilsond0667e92018-04-06 23:03:54 +01001973 i915_gem_reset(i915, stalled_mask);
Chris Wilsona31d73c2017-12-17 13:28:50 +00001974 intel_overlay_reset(i915);
1975
Chris Wilson0db8c962017-09-06 12:14:05 +01001976 /*
Ben Gamari11ed50e2009-09-14 17:48:45 -04001977 * Next we need to restore the context, but we don't use those
1978 * yet either...
1979 *
1980 * Ring buffer needs to be re-initialized in the KMS case, or if X
1981 * was running at the time of the reset (i.e. we weren't VT
1982 * switched away).
1983 */
Chris Wilson535275d2017-07-21 13:32:37 +01001984 ret = i915_gem_init_hw(i915);
Daniel Vetter33d30a92015-02-23 12:03:27 +01001985 if (ret) {
Chris Wilson8177e112018-02-07 11:15:45 +00001986 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1987 ret);
Chris Wilsond98c52c2016-04-13 17:35:05 +01001988 goto error;
Ben Gamari11ed50e2009-09-14 17:48:45 -04001989 }
1990
Chris Wilson535275d2017-07-21 13:32:37 +01001991 i915_queue_hangcheck(i915);
Chris Wilsonc2a126a2016-11-22 14:41:19 +00001992
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001993finish:
Chris Wilson535275d2017-07-21 13:32:37 +01001994 i915_gem_reset_finish(i915);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00001995wakeup:
Chris Wilson8c185ec2017-03-16 17:13:02 +00001996 clear_bit(I915_RESET_HANDOFF, &error->flags);
1997 wake_up_bit(&error->flags, I915_RESET_HANDOFF);
Chris Wilson780f2622016-09-09 14:11:52 +01001998 return;
Chris Wilsond98c52c2016-04-13 17:35:05 +01001999
Chris Wilson107783d2017-12-05 17:27:57 +00002000taint:
2001 /*
2002 * History tells us that if we cannot reset the GPU now, we
2003 * never will. This then impacts everything that is run
2004 * subsequently. On failing the reset, we mark the driver
2005 * as wedged, preventing further execution on the GPU.
2006 * We also want to go one step further and add a taint to the
2007 * kernel so that any subsequent faults can be traced back to
2008 * this failure. This is important for CI, where if the
2009 * GPU/driver fails we would like to reboot and restart testing
2010 * rather than continue on into oblivion. For everyone else,
2011 * the system should still plod along, but they have been warned!
2012 */
2013 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
Chris Wilsond98c52c2016-04-13 17:35:05 +01002014error:
Chris Wilson535275d2017-07-21 13:32:37 +01002015 i915_gem_set_wedged(i915);
Chris Wilsone61e0f52018-02-21 09:56:36 +00002016 i915_retire_requests(i915);
Chris Wilson2e8f9d32017-03-16 17:13:04 +00002017 goto finish;
Ben Gamari11ed50e2009-09-14 17:48:45 -04002018}
2019
Michel Thierry6acbea82017-10-31 15:53:09 -07002020static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
2021 struct intel_engine_cs *engine)
2022{
2023 return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
2024}
2025
Michel Thierry142bc7d2017-06-20 10:57:46 +01002026/**
2027 * i915_reset_engine - reset GPU engine to recover from a hang
2028 * @engine: engine to reset
Chris Wilsonce800752018-03-20 10:04:49 +00002029 * @msg: reason for GPU reset; or NULL for no dev_notice()
Michel Thierry142bc7d2017-06-20 10:57:46 +01002030 *
2031 * Reset a specific GPU engine. Useful if a hang is detected.
2032 * Returns zero on successful reset or otherwise an error code.
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002033 *
2034 * Procedure is:
2035 * - identifies the request that caused the hang and it is dropped
2036 * - reset engine (which will force the engine to idle)
2037 * - re-init/configure engine
Michel Thierry142bc7d2017-06-20 10:57:46 +01002038 */
Chris Wilsonce800752018-03-20 10:04:49 +00002039int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
Michel Thierry142bc7d2017-06-20 10:57:46 +01002040{
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002041 struct i915_gpu_error *error = &engine->i915->gpu_error;
Chris Wilsone61e0f52018-02-21 09:56:36 +00002042 struct i915_request *active_request;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002043 int ret;
2044
Chris Wilson02866672018-03-30 14:18:01 +01002045 GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002046 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
2047
Chris Wilsonf6ba181a2017-12-16 00:22:06 +00002048 active_request = i915_gem_reset_prepare_engine(engine);
2049 if (IS_ERR_OR_NULL(active_request)) {
2050 /* Either the previous reset failed, or we pardon the reset. */
2051 ret = PTR_ERR(active_request);
2052 goto out;
2053 }
2054
Chris Wilsonce800752018-03-20 10:04:49 +00002055 if (msg)
Chris Wilson535275d2017-07-21 13:32:37 +01002056 dev_notice(engine->i915->drm.dev,
Chris Wilsonce800752018-03-20 10:04:49 +00002057 "Resetting %s for %s\n", engine->name, msg);
Chris Wilson73676122017-07-21 13:32:31 +01002058 error->reset_engine_count[engine->id]++;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002059
Michel Thierry6acbea82017-10-31 15:53:09 -07002060 if (!engine->i915->guc.execbuf_client)
2061 ret = intel_gt_reset_engine(engine->i915, engine);
2062 else
2063 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
Chris Wilson0364cd12017-07-21 13:32:21 +01002064 if (ret) {
2065 /* If we fail here, we expect to fallback to a global reset */
Michel Thierry6acbea82017-10-31 15:53:09 -07002066 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
2067 engine->i915->guc.execbuf_client ? "GuC " : "",
Chris Wilson0364cd12017-07-21 13:32:21 +01002068 engine->name, ret);
2069 goto out;
2070 }
Chris Wilsonb4f3e162017-07-21 13:32:20 +01002071
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002072 /*
2073 * The request that caused the hang is stuck on elsp, we know the
2074 * active request and can drop it, adjust head to skip the offending
2075 * request to resume executing remaining requests in the queue.
2076 */
Chris Wilsonbba08692018-04-06 23:03:53 +01002077 i915_gem_reset_engine(engine, active_request, true);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002078
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002079 /*
2080 * The engine and its registers (and workarounds in case of render)
2081 * have been reset to their default values. Follow the init_ring
2082 * process to program RING_MODE, HWSP and re-enable submission.
2083 */
2084 ret = engine->init_hw(engine);
Michel Thierry702c8f82017-06-20 10:57:48 +01002085 if (ret)
2086 goto out;
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002087
2088out:
Chris Wilsona99b32a2018-08-14 18:18:57 +01002089 intel_engine_cancel_stop_cs(engine);
Chris Wilson0364cd12017-07-21 13:32:21 +01002090 i915_gem_reset_finish_engine(engine);
Michel Thierrya1ef70e2017-06-20 10:57:47 +01002091 return ret;
Michel Thierry142bc7d2017-06-20 10:57:46 +01002092}
2093
Chris Wilson73b66f82018-05-25 10:26:29 +01002094static int i915_pm_prepare(struct device *kdev)
2095{
2096 struct pci_dev *pdev = to_pci_dev(kdev);
2097 struct drm_device *dev = pci_get_drvdata(pdev);
2098
2099 if (!dev) {
2100 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2101 return -ENODEV;
2102 }
2103
2104 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2105 return 0;
2106
2107 return i915_drm_prepare(dev);
2108}
2109
David Weinehallc49d13e2016-08-22 13:32:42 +03002110static int i915_pm_suspend(struct device *kdev)
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002111{
David Weinehallc49d13e2016-08-22 13:32:42 +03002112 struct pci_dev *pdev = to_pci_dev(kdev);
2113 struct drm_device *dev = pci_get_drvdata(pdev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002114
David Weinehallc49d13e2016-08-22 13:32:42 +03002115 if (!dev) {
2116 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002117 return -ENODEV;
2118 }
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002119
David Weinehallc49d13e2016-08-22 13:32:42 +03002120 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10002121 return 0;
2122
David Weinehallc49d13e2016-08-22 13:32:42 +03002123 return i915_drm_suspend(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002124}
2125
David Weinehallc49d13e2016-08-22 13:32:42 +03002126static int i915_pm_suspend_late(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002127{
David Weinehallc49d13e2016-08-22 13:32:42 +03002128 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002129
2130 /*
Damien Lespiauc965d9952015-05-18 19:53:48 +01002131 * We have a suspend ordering issue with the snd-hda driver also
Imre Deak76c4b252014-04-01 19:55:22 +03002132 * requiring our device to be power up. Due to the lack of a
2133 * parent/child relationship we currently solve this with an late
2134 * suspend hook.
2135 *
2136 * FIXME: This should be solved with a special hdmi sink device or
2137 * similar so that power domains can be employed.
2138 */
David Weinehallc49d13e2016-08-22 13:32:42 +03002139 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak76c4b252014-04-01 19:55:22 +03002140 return 0;
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002141
David Weinehallc49d13e2016-08-22 13:32:42 +03002142 return i915_drm_suspend_late(dev, false);
Imre Deakab3be732015-03-02 13:04:41 +02002143}
2144
David Weinehallc49d13e2016-08-22 13:32:42 +03002145static int i915_pm_poweroff_late(struct device *kdev)
Imre Deakab3be732015-03-02 13:04:41 +02002146{
David Weinehallc49d13e2016-08-22 13:32:42 +03002147 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deakab3be732015-03-02 13:04:41 +02002148
David Weinehallc49d13e2016-08-22 13:32:42 +03002149 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deakab3be732015-03-02 13:04:41 +02002150 return 0;
2151
David Weinehallc49d13e2016-08-22 13:32:42 +03002152 return i915_drm_suspend_late(dev, true);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002153}
2154
David Weinehallc49d13e2016-08-22 13:32:42 +03002155static int i915_pm_resume_early(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002156{
David Weinehallc49d13e2016-08-22 13:32:42 +03002157 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002158
David Weinehallc49d13e2016-08-22 13:32:42 +03002159 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002160 return 0;
2161
David Weinehallc49d13e2016-08-22 13:32:42 +03002162 return i915_drm_resume_early(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002163}
2164
David Weinehallc49d13e2016-08-22 13:32:42 +03002165static int i915_pm_resume(struct device *kdev)
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002166{
David Weinehallc49d13e2016-08-22 13:32:42 +03002167 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002168
David Weinehallc49d13e2016-08-22 13:32:42 +03002169 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002170 return 0;
2171
David Weinehallc49d13e2016-08-22 13:32:42 +03002172 return i915_drm_resume(dev);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002173}
2174
Chris Wilson1f19ac22016-05-14 07:26:32 +01002175/* freeze: before creating the hibernation_image */
David Weinehallc49d13e2016-08-22 13:32:42 +03002176static int i915_pm_freeze(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002177{
Imre Deakdd9f31c2017-08-16 17:46:07 +03002178 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Chris Wilson6a800ea2016-09-21 14:51:07 +01002179 int ret;
2180
Imre Deakdd9f31c2017-08-16 17:46:07 +03002181 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2182 ret = i915_drm_suspend(dev);
2183 if (ret)
2184 return ret;
2185 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01002186
2187 ret = i915_gem_freeze(kdev_to_i915(kdev));
2188 if (ret)
2189 return ret;
2190
2191 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002192}
2193
David Weinehallc49d13e2016-08-22 13:32:42 +03002194static int i915_pm_freeze_late(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002195{
Imre Deakdd9f31c2017-08-16 17:46:07 +03002196 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Chris Wilson461fb992016-05-14 07:26:33 +01002197 int ret;
2198
Imre Deakdd9f31c2017-08-16 17:46:07 +03002199 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2200 ret = i915_drm_suspend_late(dev, true);
2201 if (ret)
2202 return ret;
2203 }
Chris Wilson461fb992016-05-14 07:26:33 +01002204
David Weinehallc49d13e2016-08-22 13:32:42 +03002205 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
Chris Wilson461fb992016-05-14 07:26:33 +01002206 if (ret)
2207 return ret;
2208
2209 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002210}
2211
2212/* thaw: called after creating the hibernation image, but before turning off. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002213static int i915_pm_thaw_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002214{
David Weinehallc49d13e2016-08-22 13:32:42 +03002215 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002216}
2217
David Weinehallc49d13e2016-08-22 13:32:42 +03002218static int i915_pm_thaw(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002219{
David Weinehallc49d13e2016-08-22 13:32:42 +03002220 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002221}
2222
2223/* restore: called after loading the hibernation image. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002224static int i915_pm_restore_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002225{
David Weinehallc49d13e2016-08-22 13:32:42 +03002226 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002227}
2228
David Weinehallc49d13e2016-08-22 13:32:42 +03002229static int i915_pm_restore(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002230{
David Weinehallc49d13e2016-08-22 13:32:42 +03002231 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002232}
2233
Imre Deakddeea5b2014-05-05 15:19:56 +03002234/*
2235 * Save all Gunit registers that may be lost after a D3 and a subsequent
2236 * S0i[R123] transition. The list of registers needing a save/restore is
2237 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2238 * registers in the following way:
2239 * - Driver: saved/restored by the driver
2240 * - Punit : saved/restored by the Punit firmware
2241 * - No, w/o marking: no need to save/restore, since the register is R/O or
2242 * used internally by the HW in a way that doesn't depend
2243 * keeping the content across a suspend/resume.
2244 * - Debug : used for debugging
2245 *
2246 * We save/restore all registers marked with 'Driver', with the following
2247 * exceptions:
2248 * - Registers out of use, including also registers marked with 'Debug'.
2249 * These have no effect on the driver's operation, so we don't save/restore
2250 * them to reduce the overhead.
2251 * - Registers that are fully setup by an initialization function called from
2252 * the resume path. For example many clock gating and RPS/RC6 registers.
2253 * - Registers that provide the right functionality with their reset defaults.
2254 *
2255 * TODO: Except for registers that based on the above 3 criteria can be safely
2256 * ignored, we save/restore all others, practically treating the HW context as
2257 * a black-box for the driver. Further investigation is needed to reduce the
2258 * saved/restored registers even further, by following the same 3 criteria.
2259 */
2260static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2261{
2262 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2263 int i;
2264
2265 /* GAM 0x4000-0x4770 */
2266 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2267 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2268 s->arb_mode = I915_READ(ARB_MODE);
2269 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2270 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2271
2272 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002273 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002274
2275 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
Imre Deakb5f1c972015-04-15 16:52:30 -07002276 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
Imre Deakddeea5b2014-05-05 15:19:56 +03002277
2278 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2279 s->ecochk = I915_READ(GAM_ECOCHK);
2280 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2281 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2282
2283 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2284
2285 /* MBC 0x9024-0x91D0, 0x8500 */
2286 s->g3dctl = I915_READ(VLV_G3DCTL);
2287 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2288 s->mbctl = I915_READ(GEN6_MBCTL);
2289
2290 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2291 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2292 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2293 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2294 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2295 s->rstctl = I915_READ(GEN6_RSTCTL);
2296 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2297
2298 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2299 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2300 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2301 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2302 s->ecobus = I915_READ(ECOBUS);
2303 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2304 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2305 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2306 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2307 s->rcedata = I915_READ(VLV_RCEDATA);
2308 s->spare2gh = I915_READ(VLV_SPAREG2H);
2309
2310 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2311 s->gt_imr = I915_READ(GTIMR);
2312 s->gt_ier = I915_READ(GTIER);
2313 s->pm_imr = I915_READ(GEN6_PMIMR);
2314 s->pm_ier = I915_READ(GEN6_PMIER);
2315
2316 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002317 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002318
2319 /* GT SA CZ domain, 0x100000-0x138124 */
2320 s->tilectl = I915_READ(TILECTL);
2321 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2322 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2323 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2324 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2325
2326 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2327 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2328 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002329 s->pcbr = I915_READ(VLV_PCBR);
Imre Deakddeea5b2014-05-05 15:19:56 +03002330 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2331
2332 /*
2333 * Not saving any of:
2334 * DFT, 0x9800-0x9EC0
2335 * SARB, 0xB000-0xB1FC
2336 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2337 * PCI CFG
2338 */
2339}
2340
2341static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2342{
2343 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2344 u32 val;
2345 int i;
2346
2347 /* GAM 0x4000-0x4770 */
2348 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2349 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2350 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2351 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2352 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2353
2354 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002355 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002356
2357 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
Imre Deakb5f1c972015-04-15 16:52:30 -07002358 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
Imre Deakddeea5b2014-05-05 15:19:56 +03002359
2360 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2361 I915_WRITE(GAM_ECOCHK, s->ecochk);
2362 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2363 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2364
2365 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2366
2367 /* MBC 0x9024-0x91D0, 0x8500 */
2368 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2369 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2370 I915_WRITE(GEN6_MBCTL, s->mbctl);
2371
2372 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2373 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2374 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2375 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2376 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2377 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2378 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2379
2380 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2381 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2382 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2383 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2384 I915_WRITE(ECOBUS, s->ecobus);
2385 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2386 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2387 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2388 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2389 I915_WRITE(VLV_RCEDATA, s->rcedata);
2390 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2391
2392 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2393 I915_WRITE(GTIMR, s->gt_imr);
2394 I915_WRITE(GTIER, s->gt_ier);
2395 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2396 I915_WRITE(GEN6_PMIER, s->pm_ier);
2397
2398 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002399 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002400
2401 /* GT SA CZ domain, 0x100000-0x138124 */
2402 I915_WRITE(TILECTL, s->tilectl);
2403 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2404 /*
2405 * Preserve the GT allow wake and GFX force clock bit, they are not
2406 * be restored, as they are used to control the s0ix suspend/resume
2407 * sequence by the caller.
2408 */
2409 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2410 val &= VLV_GTLC_ALLOWWAKEREQ;
2411 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2412 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2413
2414 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2415 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2416 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2417 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2418
2419 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2420
2421 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2422 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2423 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002424 I915_WRITE(VLV_PCBR, s->pcbr);
Imre Deakddeea5b2014-05-05 15:19:56 +03002425 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2426}
2427
Chris Wilson3dd14c02017-04-21 14:58:15 +01002428static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2429 u32 mask, u32 val)
2430{
2431 /* The HW does not like us polling for PW_STATUS frequently, so
2432 * use the sleeping loop rather than risk the busy spin within
2433 * intel_wait_for_register().
2434 *
2435 * Transitioning between RC6 states should be at most 2ms (see
2436 * valleyview_enable_rps) so use a 3ms timeout.
2437 */
2438 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2439 3);
2440}
2441
Imre Deak650ad972014-04-18 16:35:02 +03002442int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2443{
2444 u32 val;
2445 int err;
2446
Imre Deak650ad972014-04-18 16:35:02 +03002447 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2448 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2449 if (force_on)
2450 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2451 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2452
2453 if (!force_on)
2454 return 0;
2455
Chris Wilsonc6ddc5f2016-06-30 15:32:46 +01002456 err = intel_wait_for_register(dev_priv,
2457 VLV_GTLC_SURVIVABILITY_REG,
2458 VLV_GFX_CLK_STATUS_BIT,
2459 VLV_GFX_CLK_STATUS_BIT,
2460 20);
Imre Deak650ad972014-04-18 16:35:02 +03002461 if (err)
2462 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2463 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2464
2465 return err;
Imre Deak650ad972014-04-18 16:35:02 +03002466}
2467
Imre Deakddeea5b2014-05-05 15:19:56 +03002468static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2469{
Chris Wilson3dd14c02017-04-21 14:58:15 +01002470 u32 mask;
Imre Deakddeea5b2014-05-05 15:19:56 +03002471 u32 val;
Chris Wilson3dd14c02017-04-21 14:58:15 +01002472 int err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002473
2474 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2475 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2476 if (allow)
2477 val |= VLV_GTLC_ALLOWWAKEREQ;
2478 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2479 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2480
Chris Wilson3dd14c02017-04-21 14:58:15 +01002481 mask = VLV_GTLC_ALLOWWAKEACK;
2482 val = allow ? mask : 0;
2483
2484 err = vlv_wait_for_pw_status(dev_priv, mask, val);
Imre Deakddeea5b2014-05-05 15:19:56 +03002485 if (err)
2486 DRM_ERROR("timeout disabling GT waking\n");
Chris Wilsonb2736692016-06-30 15:32:47 +01002487
Imre Deakddeea5b2014-05-05 15:19:56 +03002488 return err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002489}
2490
Chris Wilson3dd14c02017-04-21 14:58:15 +01002491static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2492 bool wait_for_on)
Imre Deakddeea5b2014-05-05 15:19:56 +03002493{
2494 u32 mask;
2495 u32 val;
Imre Deakddeea5b2014-05-05 15:19:56 +03002496
2497 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2498 val = wait_for_on ? mask : 0;
Imre Deakddeea5b2014-05-05 15:19:56 +03002499
2500 /*
2501 * RC6 transitioning can be delayed up to 2 msec (see
2502 * valleyview_enable_rps), use 3 msec for safety.
Chris Wilsone01569a2018-04-09 10:49:05 +01002503 *
2504 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2505 * reset and we are trying to force the machine to sleep.
Imre Deakddeea5b2014-05-05 15:19:56 +03002506 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002507 if (vlv_wait_for_pw_status(dev_priv, mask, val))
Chris Wilsone01569a2018-04-09 10:49:05 +01002508 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2509 onoff(wait_for_on));
Imre Deakddeea5b2014-05-05 15:19:56 +03002510}
2511
2512static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2513{
2514 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2515 return;
2516
Daniel Vetter6fa283b2016-01-19 21:00:56 +01002517 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
Imre Deakddeea5b2014-05-05 15:19:56 +03002518 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2519}
2520
Sagar Kambleebc32822014-08-13 23:07:05 +05302521static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
Imre Deakddeea5b2014-05-05 15:19:56 +03002522{
2523 u32 mask;
2524 int err;
2525
2526 /*
2527 * Bspec defines the following GT well on flags as debug only, so
2528 * don't treat them as hard failures.
2529 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002530 vlv_wait_for_gt_wells(dev_priv, false);
Imre Deakddeea5b2014-05-05 15:19:56 +03002531
2532 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2533 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2534
2535 vlv_check_no_gt_access(dev_priv);
2536
2537 err = vlv_force_gfx_clock(dev_priv, true);
2538 if (err)
2539 goto err1;
2540
2541 err = vlv_allow_gt_wake(dev_priv, false);
2542 if (err)
2543 goto err2;
Deepak S98711162014-12-12 14:18:16 +05302544
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002545 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302546 vlv_save_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002547
2548 err = vlv_force_gfx_clock(dev_priv, false);
2549 if (err)
2550 goto err2;
2551
2552 return 0;
2553
2554err2:
2555 /* For safety always re-enable waking and disable gfx clock forcing */
2556 vlv_allow_gt_wake(dev_priv, true);
2557err1:
2558 vlv_force_gfx_clock(dev_priv, false);
2559
2560 return err;
2561}
2562
Sagar Kamble016970b2014-08-13 23:07:06 +05302563static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2564 bool rpm_resume)
Imre Deakddeea5b2014-05-05 15:19:56 +03002565{
Imre Deakddeea5b2014-05-05 15:19:56 +03002566 int err;
2567 int ret;
2568
2569 /*
2570 * If any of the steps fail just try to continue, that's the best we
2571 * can do at this point. Return the first error code (which will also
2572 * leave RPM permanently disabled).
2573 */
2574 ret = vlv_force_gfx_clock(dev_priv, true);
2575
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002576 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302577 vlv_restore_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002578
2579 err = vlv_allow_gt_wake(dev_priv, true);
2580 if (!ret)
2581 ret = err;
2582
2583 err = vlv_force_gfx_clock(dev_priv, false);
2584 if (!ret)
2585 ret = err;
2586
2587 vlv_check_no_gt_access(dev_priv);
2588
Chris Wilson7c108fd2016-10-24 13:42:18 +01002589 if (rpm_resume)
Ville Syrjälä46f16e62016-10-31 22:37:22 +02002590 intel_init_clock_gating(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002591
2592 return ret;
2593}
2594
David Weinehallc49d13e2016-08-22 13:32:42 +03002595static int intel_runtime_suspend(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002596{
David Weinehallc49d13e2016-08-22 13:32:42 +03002597 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002598 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002599 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002600 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002601
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00002602 if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
Imre Deakc6df39b2014-04-14 20:24:29 +03002603 return -ENODEV;
2604
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002605 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002606 return -ENODEV;
2607
Paulo Zanoni8a187452013-12-06 20:32:13 -02002608 DRM_DEBUG_KMS("Suspending device\n");
2609
Imre Deak1f814da2015-12-16 02:52:19 +02002610 disable_rpm_wakeref_asserts(dev_priv);
2611
Imre Deakd6102972014-05-07 19:57:49 +03002612 /*
2613 * We are safe here against re-faults, since the fault handler takes
2614 * an RPM reference.
2615 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01002616 i915_gem_runtime_suspend(dev_priv);
Imre Deakd6102972014-05-07 19:57:49 +03002617
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002618 intel_uc_suspend(dev_priv);
Alex Daia1c41992015-09-30 09:46:37 -07002619
Imre Deak2eb52522014-11-19 15:30:05 +02002620 intel_runtime_pm_disable_interrupts(dev_priv);
Imre Deakb5478bc2014-04-14 20:24:37 +03002621
Hans de Goede01c799c2017-11-14 14:55:18 +01002622 intel_uncore_suspend(dev_priv);
2623
Imre Deak507e1262016-04-20 20:27:54 +03002624 ret = 0;
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02002625 if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002626 bxt_display_core_uninit(dev_priv);
2627 bxt_enable_dc9(dev_priv);
2628 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2629 hsw_enable_pc8(dev_priv);
2630 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2631 ret = vlv_suspend_complete(dev_priv);
2632 }
2633
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002634 if (ret) {
2635 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
Hans de Goede01c799c2017-11-14 14:55:18 +01002636 intel_uncore_runtime_resume(dev_priv);
2637
Daniel Vetterb9632912014-09-30 10:56:44 +02002638 intel_runtime_pm_enable_interrupts(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002639
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002640 intel_uc_resume(dev_priv);
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302641
2642 i915_gem_init_swizzling(dev_priv);
2643 i915_gem_restore_fences(dev_priv);
2644
Imre Deak1f814da2015-12-16 02:52:19 +02002645 enable_rpm_wakeref_asserts(dev_priv);
2646
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002647 return ret;
2648 }
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002649
Imre Deak1f814da2015-12-16 02:52:19 +02002650 enable_rpm_wakeref_asserts(dev_priv);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002651 WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002652
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002653 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002654 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2655
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002656 dev_priv->runtime_pm.suspended = true;
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002657
2658 /*
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002659 * FIXME: We really should find a document that references the arguments
2660 * used below!
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002661 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002662 if (IS_BROADWELL(dev_priv)) {
Paulo Zanonid37ae192015-07-30 18:20:29 -03002663 /*
2664 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2665 * being detected, and the call we do at intel_runtime_resume()
2666 * won't be able to restore them. Since PCI_D3hot matches the
2667 * actual specification and appears to be working, use it.
2668 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002669 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
Paulo Zanonid37ae192015-07-30 18:20:29 -03002670 } else {
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002671 /*
2672 * current versions of firmware which depend on this opregion
2673 * notification have repurposed the D1 definition to mean
2674 * "runtime suspended" vs. what you would normally expect (D3)
2675 * to distinguish it from notifications that might be sent via
2676 * the suspend path.
2677 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002678 intel_opregion_notify_adapter(dev_priv, PCI_D1);
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002679 }
Paulo Zanoni8a187452013-12-06 20:32:13 -02002680
Mika Kuoppala59bad942015-01-16 11:34:40 +02002681 assert_forcewakes_inactive(dev_priv);
Chris Wilsondc9fb092015-01-16 11:34:34 +02002682
Ander Conselvan de Oliveira21d6e0b2017-01-20 16:28:43 +02002683 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Lyude19625e82016-06-21 17:03:44 -04002684 intel_hpd_poll_init(dev_priv);
2685
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002686 DRM_DEBUG_KMS("Device suspended\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002687 return 0;
2688}
2689
David Weinehallc49d13e2016-08-22 13:32:42 +03002690static int intel_runtime_resume(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002691{
David Weinehallc49d13e2016-08-22 13:32:42 +03002692 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002693 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002694 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002695 int ret = 0;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002696
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002697 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002698 return -ENODEV;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002699
2700 DRM_DEBUG_KMS("Resuming device\n");
2701
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002702 WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
Imre Deak1f814da2015-12-16 02:52:19 +02002703 disable_rpm_wakeref_asserts(dev_priv);
2704
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002705 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002706 dev_priv->runtime_pm.suspended = false;
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002707 if (intel_uncore_unclaimed_mmio(dev_priv))
2708 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002709
Rodrigo Vivib9fd7992016-12-16 17:42:25 +02002710 if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002711 bxt_disable_dc9(dev_priv);
2712 bxt_display_core_init(dev_priv, true);
Imre Deakf62c79b2016-04-20 20:27:57 +03002713 if (dev_priv->csr.dmc_payload &&
2714 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2715 gen9_enable_dc5(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002716 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002717 hsw_disable_pc8(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002718 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002719 ret = vlv_resume_prepare(dev_priv, true);
Imre Deak507e1262016-04-20 20:27:54 +03002720 }
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002721
Hans de Goedebedf4d72017-11-14 14:55:17 +01002722 intel_uncore_runtime_resume(dev_priv);
2723
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302724 intel_runtime_pm_enable_interrupts(dev_priv);
2725
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002726 intel_uc_resume(dev_priv);
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302727
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002728 /*
2729 * No point of rolling back things in case of an error, as the best
2730 * we can do is to hope that things will still work (and disable RPM).
2731 */
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00002732 i915_gem_init_swizzling(dev_priv);
Chris Wilson83bf6d52017-02-03 12:57:17 +00002733 i915_gem_restore_fences(dev_priv);
Imre Deak92b806d2014-04-14 20:24:39 +03002734
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002735 /*
2736 * On VLV/CHV display interrupts are part of the display
2737 * power well, so hpd is reinitialized from there. For
2738 * everyone else do it here.
2739 */
Wayne Boyer666a4532015-12-09 12:29:35 -08002740 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002741 intel_hpd_init(dev_priv);
2742
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +05302743 intel_enable_ipc(dev_priv);
2744
Imre Deak1f814da2015-12-16 02:52:19 +02002745 enable_rpm_wakeref_asserts(dev_priv);
2746
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002747 if (ret)
2748 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2749 else
2750 DRM_DEBUG_KMS("Device resumed\n");
2751
2752 return ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002753}
2754
Chris Wilson42f55512016-06-24 14:00:26 +01002755const struct dev_pm_ops i915_pm_ops = {
Imre Deak5545dbb2014-10-23 19:23:28 +03002756 /*
2757 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2758 * PMSG_RESUME]
2759 */
Chris Wilson73b66f82018-05-25 10:26:29 +01002760 .prepare = i915_pm_prepare,
Akshay Joshi0206e352011-08-16 15:34:10 -04002761 .suspend = i915_pm_suspend,
Imre Deak76c4b252014-04-01 19:55:22 +03002762 .suspend_late = i915_pm_suspend_late,
2763 .resume_early = i915_pm_resume_early,
Akshay Joshi0206e352011-08-16 15:34:10 -04002764 .resume = i915_pm_resume,
Imre Deak5545dbb2014-10-23 19:23:28 +03002765
2766 /*
2767 * S4 event handlers
2768 * @freeze, @freeze_late : called (1) before creating the
2769 * hibernation image [PMSG_FREEZE] and
2770 * (2) after rebooting, before restoring
2771 * the image [PMSG_QUIESCE]
2772 * @thaw, @thaw_early : called (1) after creating the hibernation
2773 * image, before writing it [PMSG_THAW]
2774 * and (2) after failing to create or
2775 * restore the image [PMSG_RECOVER]
2776 * @poweroff, @poweroff_late: called after writing the hibernation
2777 * image, before rebooting [PMSG_HIBERNATE]
2778 * @restore, @restore_early : called after rebooting and restoring the
2779 * hibernation image [PMSG_RESTORE]
2780 */
Chris Wilson1f19ac22016-05-14 07:26:32 +01002781 .freeze = i915_pm_freeze,
2782 .freeze_late = i915_pm_freeze_late,
2783 .thaw_early = i915_pm_thaw_early,
2784 .thaw = i915_pm_thaw,
Imre Deak36d61e62014-10-23 19:23:24 +03002785 .poweroff = i915_pm_suspend,
Imre Deakab3be732015-03-02 13:04:41 +02002786 .poweroff_late = i915_pm_poweroff_late,
Chris Wilson1f19ac22016-05-14 07:26:32 +01002787 .restore_early = i915_pm_restore_early,
2788 .restore = i915_pm_restore,
Imre Deak5545dbb2014-10-23 19:23:28 +03002789
2790 /* S0ix (via runtime suspend) event handlers */
Paulo Zanoni97bea202014-03-07 20:12:33 -03002791 .runtime_suspend = intel_runtime_suspend,
2792 .runtime_resume = intel_runtime_resume,
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002793};
2794
Laurent Pinchart78b68552012-05-17 13:27:22 +02002795static const struct vm_operations_struct i915_gem_vm_ops = {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002796 .fault = i915_gem_fault,
Jesse Barnesab00b3e2009-02-11 14:01:46 -08002797 .open = drm_gem_vm_open,
2798 .close = drm_gem_vm_close,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002799};
2800
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002801static const struct file_operations i915_driver_fops = {
2802 .owner = THIS_MODULE,
2803 .open = drm_open,
2804 .release = drm_release,
2805 .unlocked_ioctl = drm_ioctl,
2806 .mmap = drm_gem_mmap,
2807 .poll = drm_poll,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002808 .read = drm_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002809 .compat_ioctl = i915_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002810 .llseek = noop_llseek,
2811};
2812
Chris Wilson0673ad42016-06-24 14:00:22 +01002813static int
2814i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2815 struct drm_file *file)
2816{
2817 return -ENODEV;
2818}
2819
2820static const struct drm_ioctl_desc i915_ioctls[] = {
2821 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2822 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2823 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2824 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2825 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2826 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02002827 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002828 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2829 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2830 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2831 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2832 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2833 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2834 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2835 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2836 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2837 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2838 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02002839 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2840 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002841 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2842 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2843 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2844 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2845 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2846 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2847 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2848 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2849 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2850 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2851 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2852 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2853 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2854 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2855 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
Chris Wilson111dbca2017-01-10 12:10:44 +00002856 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2857 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002858 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02002859 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
Chris Wilson0673ad42016-06-24 14:00:22 +01002860 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
Daniel Vetter0cd54b02018-04-20 08:51:57 +02002861 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
2862 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
2863 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
2864 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
Chris Wilson0673ad42016-06-24 14:00:22 +01002865 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2866 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2867 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2868 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2869 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2870 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2871 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2872 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
Robert Braggeec688e2016-11-07 19:49:47 +00002873 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002874 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2875 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
Lionel Landwerlina446ae22018-03-06 12:28:56 +00002876 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01002877};
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879static struct drm_driver driver = {
Michael Witten0c547812011-08-25 17:55:54 +00002880 /* Don't use MTRRs here; the Xserver or userspace app should
2881 * deal with them for Intel hardware.
Dave Airlie792d2b92005-11-11 23:30:27 +11002882 */
Eric Anholt673a3942008-07-30 12:06:12 -07002883 .driver_features =
Kristian Høgsberg10ba5012013-08-25 18:29:01 +02002884 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +01002885 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
Chris Wilsoncad36882017-02-10 16:35:21 +00002886 .release = i915_driver_release,
Eric Anholt673a3942008-07-30 12:06:12 -07002887 .open = i915_driver_open,
Dave Airlie22eae942005-11-10 22:16:34 +11002888 .lastclose = i915_driver_lastclose,
Eric Anholt673a3942008-07-30 12:06:12 -07002889 .postclose = i915_driver_postclose,
Rafael J. Wysockid8e29202010-01-09 00:45:33 +01002890
Chris Wilsonb1f788c2016-08-04 07:52:45 +01002891 .gem_close_object = i915_gem_close_object,
Chris Wilsonf0cd5182016-10-28 13:58:43 +01002892 .gem_free_object_unlocked = i915_gem_free_object,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002893 .gem_vm_ops = &i915_gem_vm_ops,
Daniel Vetter1286ff72012-05-10 15:25:09 +02002894
2895 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2896 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2897 .gem_prime_export = i915_gem_prime_export,
2898 .gem_prime_import = i915_gem_prime_import,
2899
Dave Airlieff72145b2011-02-07 12:16:14 +10002900 .dumb_create = i915_gem_dumb_create,
Dave Airlieda6b51d2014-12-24 13:11:17 +10002901 .dumb_map_offset = i915_gem_mmap_gtt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 .ioctls = i915_ioctls,
Chris Wilson0673ad42016-06-24 14:00:22 +01002903 .num_ioctls = ARRAY_SIZE(i915_ioctls),
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002904 .fops = &i915_driver_fops,
Dave Airlie22eae942005-11-10 22:16:34 +11002905 .name = DRIVER_NAME,
2906 .desc = DRIVER_DESC,
2907 .date = DRIVER_DATE,
2908 .major = DRIVER_MAJOR,
2909 .minor = DRIVER_MINOR,
2910 .patchlevel = DRIVER_PATCHLEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911};
Chris Wilson66d9cb52017-02-13 17:15:17 +00002912
2913#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2914#include "selftests/mock_drm.c"
2915#endif