blob: 71a4da5caee741934de887907260b1a8eb32c1ab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jesse Barnese5747e32014-06-12 08:35:47 -070030#include <linux/acpi.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010031#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/vt.h>
42#include <acpi/video.h>
43
Maarten Lankhorsta667fb42016-12-15 15:29:44 +010044#include <drm/drm_atomic_helper.h>
Sam Ravnborgd0e93592019-01-26 13:25:24 +010045#include <drm/drm_ioctl.h>
46#include <drm/drm_irq.h>
47#include <drm/drm_probe_helper.h>
David Howells760285e2012-10-02 18:01:07 +010048#include <drm/i915_drm.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include "i915_drv.h"
Chris Wilson990bbda2012-07-02 11:51:02 -030051#include "i915_trace.h"
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +000052#include "i915_pmu.h"
Chris Wilson9f588922019-01-16 15:33:04 +000053#include "i915_reset.h"
Lionel Landwerlina446ae22018-03-06 12:28:56 +000054#include "i915_query.h"
Chris Wilson0673ad42016-06-24 14:00:22 +010055#include "i915_vgpu.h"
Kenneth Graunkef49f0582010-09-11 01:19:14 -070056#include "intel_drv.h"
Anusha Srivatsa5464cd62017-01-18 08:05:58 -080057#include "intel_uc.h"
Tvrtko Ursulin094304b2018-12-03 12:50:10 +000058#include "intel_workarounds.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Kristian Høgsberg112b7152009-01-04 16:55:33 -050060static struct drm_driver driver;
61
Michal Wajdeczkofae919f2018-02-01 17:32:48 +000062#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
Chris Wilson0673ad42016-06-24 14:00:22 +010063static unsigned int i915_load_fail_count;
64
65bool __i915_inject_load_failure(const char *func, int line)
66{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000067 if (i915_load_fail_count >= i915_modparams.inject_load_failure)
Chris Wilson0673ad42016-06-24 14:00:22 +010068 return false;
69
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000070 if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
Chris Wilson0673ad42016-06-24 14:00:22 +010071 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000072 i915_modparams.inject_load_failure, func, line);
Chris Wilsoncf68f0c2018-06-06 15:41:53 +010073 i915_modparams.inject_load_failure = 0;
Chris Wilson0673ad42016-06-24 14:00:22 +010074 return true;
75 }
76
77 return false;
78}
Chris Wilson51c18bf2018-06-09 12:10:58 +010079
80bool i915_error_injected(void)
81{
82 return i915_load_fail_count && !i915_modparams.inject_load_failure;
83}
84
Michal Wajdeczkofae919f2018-02-01 17:32:48 +000085#endif
Chris Wilson0673ad42016-06-24 14:00:22 +010086
87#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
88#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
89 "providing the dmesg log by booting with drm.debug=0xf"
90
91void
92__i915_printk(struct drm_i915_private *dev_priv, const char *level,
93 const char *fmt, ...)
94{
95 static bool shown_bug_once;
David Weinehallc49d13e2016-08-22 13:32:42 +030096 struct device *kdev = dev_priv->drm.dev;
Chris Wilson0673ad42016-06-24 14:00:22 +010097 bool is_error = level[1] <= KERN_ERR[1];
98 bool is_debug = level[1] == KERN_DEBUG[1];
99 struct va_format vaf;
100 va_list args;
101
102 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
103 return;
104
105 va_start(args, fmt);
106
107 vaf.fmt = fmt;
108 vaf.va = &args;
109
Chris Wilson8cff1f42018-07-09 14:48:58 +0100110 if (is_error)
111 dev_printk(level, kdev, "%pV", &vaf);
112 else
113 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
114 __builtin_return_address(0), &vaf);
115
116 va_end(args);
Chris Wilson0673ad42016-06-24 14:00:22 +0100117
118 if (is_error && !shown_bug_once) {
Chris Wilson4e8507b2018-05-06 19:31:47 +0100119 /*
120 * Ask the user to file a bug report for the error, except
121 * if they may have caused the bug by fiddling with unsafe
122 * module parameters.
123 */
124 if (!test_taint(TAINT_USER))
125 dev_notice(kdev, "%s", FDO_BUG_MSG);
Chris Wilson0673ad42016-06-24 14:00:22 +0100126 shown_bug_once = true;
127 }
Chris Wilson0673ad42016-06-24 14:00:22 +0100128}
129
Jani Nikulada6c10c22018-02-05 19:31:36 +0200130/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
131static enum intel_pch
132intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
133{
134 switch (id) {
135 case INTEL_PCH_IBX_DEVICE_ID_TYPE:
136 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800137 WARN_ON(!IS_GEN(dev_priv, 5));
Jani Nikulada6c10c22018-02-05 19:31:36 +0200138 return PCH_IBX;
139 case INTEL_PCH_CPT_DEVICE_ID_TYPE:
140 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800141 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
Jani Nikulada6c10c22018-02-05 19:31:36 +0200142 return PCH_CPT;
143 case INTEL_PCH_PPT_DEVICE_ID_TYPE:
144 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800145 WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
Jani Nikulada6c10c22018-02-05 19:31:36 +0200146 /* PantherPoint is CPT compatible */
147 return PCH_CPT;
148 case INTEL_PCH_LPT_DEVICE_ID_TYPE:
149 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
150 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
151 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
152 return PCH_LPT;
153 case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
154 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
155 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
156 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
157 return PCH_LPT;
158 case INTEL_PCH_WPT_DEVICE_ID_TYPE:
159 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
160 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
161 WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
162 /* WildcatPoint is LPT compatible */
163 return PCH_LPT;
164 case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
165 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
166 WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
167 WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
168 /* WildcatPoint is LPT compatible */
169 return PCH_LPT;
170 case INTEL_PCH_SPT_DEVICE_ID_TYPE:
171 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
172 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
173 return PCH_SPT;
174 case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
175 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
176 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
177 return PCH_SPT;
178 case INTEL_PCH_KBP_DEVICE_ID_TYPE:
179 DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
180 WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
181 !IS_COFFEELAKE(dev_priv));
182 return PCH_KBP;
183 case INTEL_PCH_CNP_DEVICE_ID_TYPE:
184 DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
185 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
186 return PCH_CNP;
187 case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
188 DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
189 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
190 return PCH_CNP;
191 case INTEL_PCH_ICP_DEVICE_ID_TYPE:
192 DRM_DEBUG_KMS("Found Ice Lake PCH\n");
193 WARN_ON(!IS_ICELAKE(dev_priv));
194 return PCH_ICP;
195 default:
196 return PCH_NONE;
197 }
198}
Chris Wilson0673ad42016-06-24 14:00:22 +0100199
Jani Nikula435ad2c2018-02-05 19:31:37 +0200200static bool intel_is_virt_pch(unsigned short id,
201 unsigned short svendor, unsigned short sdevice)
202{
203 return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
204 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
205 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
206 svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
207 sdevice == PCI_SUBDEVICE_ID_QEMU));
208}
209
Jani Nikula40ace642018-02-05 19:31:38 +0200210static unsigned short
211intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
Robert Beckett30c964a2015-08-28 13:10:22 +0100212{
Jani Nikula40ace642018-02-05 19:31:38 +0200213 unsigned short id = 0;
Robert Beckett30c964a2015-08-28 13:10:22 +0100214
215 /*
216 * In a virtualized passthrough environment we can be in a
217 * setup where the ISA bridge is not able to be passed through.
218 * In this case, a south bridge can be emulated and we have to
219 * make an educated guess as to which PCH is really there.
220 */
221
Rodrigo Vivi993298a2019-03-01 09:27:03 -0800222 if (IS_ICELAKE(dev_priv))
223 id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
224 else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
225 id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
226 else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
227 id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
Jani Nikula40ace642018-02-05 19:31:38 +0200228 else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
229 id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
230 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
231 id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
Rodrigo Vivi993298a2019-03-01 09:27:03 -0800232 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
233 id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
234 else if (IS_GEN(dev_priv, 5))
235 id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
Robert Beckett30c964a2015-08-28 13:10:22 +0100236
Jani Nikula40ace642018-02-05 19:31:38 +0200237 if (id)
238 DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
239 else
240 DRM_DEBUG_KMS("Assuming no PCH\n");
241
242 return id;
Robert Beckett30c964a2015-08-28 13:10:22 +0100243}
244
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000245static void intel_detect_pch(struct drm_i915_private *dev_priv)
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800246{
Imre Deakbcdb72a2014-02-14 20:23:54 +0200247 struct pci_dev *pch = NULL;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800248
249 /*
250 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
251 * make graphics device passthrough work easy for VMM, that only
252 * need to expose ISA bridge to let driver know the real hardware
253 * underneath. This is a requirement from virtualization team.
Rui Guo6a9c4b32013-06-19 21:10:23 +0800254 *
255 * In some virtualized environments (e.g. XEN), there is irrelevant
256 * ISA bridge in the system. To work reliably, we should scan trhough
257 * all the ISA bridge devices and check for the first match, instead
258 * of only checking the first one.
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800259 */
Imre Deakbcdb72a2014-02-14 20:23:54 +0200260 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200261 unsigned short id;
Jani Nikulada6c10c22018-02-05 19:31:36 +0200262 enum intel_pch pch_type;
Ville Syrjäläc5e855d2017-06-21 20:49:44 +0300263
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200264 if (pch->vendor != PCI_VENDOR_ID_INTEL)
265 continue;
Dhinakaran Pandiyanec7e0bb2017-06-02 13:06:40 -0700266
Jani Nikulad67c0ac2018-02-02 15:04:16 +0200267 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
Imre Deakbcdb72a2014-02-14 20:23:54 +0200268
Jani Nikulada6c10c22018-02-05 19:31:36 +0200269 pch_type = intel_pch_type(dev_priv, id);
270 if (pch_type != PCH_NONE) {
271 dev_priv->pch_type = pch_type;
Jani Nikula40ace642018-02-05 19:31:38 +0200272 dev_priv->pch_id = id;
273 break;
Jani Nikula435ad2c2018-02-05 19:31:37 +0200274 } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
Jani Nikula40ace642018-02-05 19:31:38 +0200275 pch->subsystem_device)) {
276 id = intel_virt_detect_pch(dev_priv);
Jani Nikula85b17e62018-06-08 15:33:28 +0300277 pch_type = intel_pch_type(dev_priv, id);
278
279 /* Sanity check virtual PCH id */
280 if (WARN_ON(id && pch_type == PCH_NONE))
281 id = 0;
282
Jani Nikula40ace642018-02-05 19:31:38 +0200283 dev_priv->pch_type = pch_type;
284 dev_priv->pch_id = id;
285 break;
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800286 }
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800287 }
Jani Nikula07ba0a82018-06-08 15:33:30 +0300288
289 /*
290 * Use PCH_NOP (PCH but no South Display) for PCH platforms without
291 * display.
292 */
José Roberto de Souzae1bf0942018-11-30 15:20:47 -0800293 if (pch && !HAS_DISPLAY(dev_priv)) {
Jani Nikula07ba0a82018-06-08 15:33:30 +0300294 DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
295 dev_priv->pch_type = PCH_NOP;
296 dev_priv->pch_id = 0;
297 }
298
Rui Guo6a9c4b32013-06-19 21:10:23 +0800299 if (!pch)
Imre Deakbcdb72a2014-02-14 20:23:54 +0200300 DRM_DEBUG_KMS("No PCH found.\n");
301
302 pci_dev_put(pch);
Zhenyu Wang3bad0782010-04-07 16:15:53 +0800303}
304
Ville Syrjälä6a20fe72018-02-07 18:48:41 +0200305static int i915_getparam_ioctl(struct drm_device *dev, void *data,
306 struct drm_file *file_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100307{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100308 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300309 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100310 drm_i915_getparam_t *param = data;
311 int value;
312
313 switch (param->param) {
314 case I915_PARAM_IRQ_ACTIVE:
315 case I915_PARAM_ALLOW_BATCHBUFFER:
316 case I915_PARAM_LAST_DISPATCH:
Kenneth Graunkeef0f4112017-02-15 01:34:46 -0800317 case I915_PARAM_HAS_EXEC_CONSTANTS:
Chris Wilson0673ad42016-06-24 14:00:22 +0100318 /* Reject all old ums/dri params. */
319 return -ENODEV;
320 case I915_PARAM_CHIPSET_ID:
David Weinehall52a05c32016-08-22 13:32:44 +0300321 value = pdev->device;
Chris Wilson0673ad42016-06-24 14:00:22 +0100322 break;
323 case I915_PARAM_REVISION:
David Weinehall52a05c32016-08-22 13:32:44 +0300324 value = pdev->revision;
Chris Wilson0673ad42016-06-24 14:00:22 +0100325 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100326 case I915_PARAM_NUM_FENCES_AVAIL:
327 value = dev_priv->num_fence_regs;
328 break;
329 case I915_PARAM_HAS_OVERLAY:
330 value = dev_priv->overlay ? 1 : 0;
331 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100332 case I915_PARAM_HAS_BSD:
Chris Wilson8a68d462019-03-05 18:03:30 +0000333 value = !!dev_priv->engine[VCS0];
Chris Wilson0673ad42016-06-24 14:00:22 +0100334 break;
335 case I915_PARAM_HAS_BLT:
Chris Wilson8a68d462019-03-05 18:03:30 +0000336 value = !!dev_priv->engine[BCS0];
Chris Wilson0673ad42016-06-24 14:00:22 +0100337 break;
338 case I915_PARAM_HAS_VEBOX:
Chris Wilson8a68d462019-03-05 18:03:30 +0000339 value = !!dev_priv->engine[VECS0];
Chris Wilson0673ad42016-06-24 14:00:22 +0100340 break;
341 case I915_PARAM_HAS_BSD2:
Chris Wilson8a68d462019-03-05 18:03:30 +0000342 value = !!dev_priv->engine[VCS1];
Chris Wilson0673ad42016-06-24 14:00:22 +0100343 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100344 case I915_PARAM_HAS_LLC:
David Weinehall16162472016-09-02 13:46:17 +0300345 value = HAS_LLC(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100346 break;
347 case I915_PARAM_HAS_WT:
David Weinehall16162472016-09-02 13:46:17 +0300348 value = HAS_WT(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100349 break;
350 case I915_PARAM_HAS_ALIASING_PPGTT:
Chris Wilson4bdafb92018-09-26 21:12:22 +0100351 value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
Chris Wilson0673ad42016-06-24 14:00:22 +0100352 break;
353 case I915_PARAM_HAS_SEMAPHORES:
Chris Wilsone8861962019-03-01 17:09:00 +0000354 value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
Chris Wilson0673ad42016-06-24 14:00:22 +0100355 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100356 case I915_PARAM_HAS_SECURE_BATCHES:
357 value = capable(CAP_SYS_ADMIN);
358 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100359 case I915_PARAM_CMD_PARSER_VERSION:
360 value = i915_cmd_parser_get_version(dev_priv);
361 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100362 case I915_PARAM_SUBSLICE_TOTAL:
Jani Nikula02584042018-12-31 16:56:41 +0200363 value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
Chris Wilson0673ad42016-06-24 14:00:22 +0100364 if (!value)
365 return -ENODEV;
366 break;
367 case I915_PARAM_EU_TOTAL:
Jani Nikula02584042018-12-31 16:56:41 +0200368 value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
Chris Wilson0673ad42016-06-24 14:00:22 +0100369 if (!value)
370 return -ENODEV;
371 break;
372 case I915_PARAM_HAS_GPU_RESET:
Michal Wajdeczko4f044a82017-09-19 19:38:44 +0000373 value = i915_modparams.enable_hangcheck &&
374 intel_has_gpu_reset(dev_priv);
Michel Thierry142bc7d2017-06-20 10:57:46 +0100375 if (value && intel_has_reset_engine(dev_priv))
376 value = 2;
Chris Wilson0673ad42016-06-24 14:00:22 +0100377 break;
378 case I915_PARAM_HAS_RESOURCE_STREAMER:
Lucas De Marchi08e3e212018-08-03 16:24:43 -0700379 value = 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100380 break;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100381 case I915_PARAM_HAS_POOLED_EU:
David Weinehall16162472016-09-02 13:46:17 +0300382 value = HAS_POOLED_EU(dev_priv);
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100383 break;
384 case I915_PARAM_MIN_EU_IN_POOL:
Jani Nikula02584042018-12-31 16:56:41 +0200385 value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
arun.siluvery@linux.intel.com37f501a2016-07-01 11:43:02 +0100386 break;
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800387 case I915_PARAM_HUC_STATUS:
Michal Wajdeczkofa265272018-03-14 20:04:29 +0000388 value = intel_huc_check_status(&dev_priv->huc);
389 if (value < 0)
390 return value;
Anusha Srivatsa5464cd62017-01-18 08:05:58 -0800391 break;
Chris Wilson4cc69072016-08-25 19:05:19 +0100392 case I915_PARAM_MMAP_GTT_VERSION:
393 /* Though we've started our numbering from 1, and so class all
394 * earlier versions as 0, in effect their value is undefined as
395 * the ioctl will report EINVAL for the unknown param!
396 */
397 value = i915_gem_mmap_gtt_version();
398 break;
Chris Wilson0de91362016-11-14 20:41:01 +0000399 case I915_PARAM_HAS_SCHEDULER:
Chris Wilson3fed1802018-02-07 21:05:43 +0000400 value = dev_priv->caps.scheduler;
Chris Wilson0de91362016-11-14 20:41:01 +0000401 break;
Chris Wilsonbeecec92017-10-03 21:34:52 +0100402
David Weinehall16162472016-09-02 13:46:17 +0300403 case I915_PARAM_MMAP_VERSION:
404 /* Remember to bump this if the version changes! */
405 case I915_PARAM_HAS_GEM:
406 case I915_PARAM_HAS_PAGEFLIPPING:
407 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
408 case I915_PARAM_HAS_RELAXED_FENCING:
409 case I915_PARAM_HAS_COHERENT_RINGS:
410 case I915_PARAM_HAS_RELAXED_DELTA:
411 case I915_PARAM_HAS_GEN7_SOL_RESET:
412 case I915_PARAM_HAS_WAIT_TIMEOUT:
413 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
414 case I915_PARAM_HAS_PINNED_BATCHES:
415 case I915_PARAM_HAS_EXEC_NO_RELOC:
416 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
417 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
418 case I915_PARAM_HAS_EXEC_SOFTPIN:
Chris Wilson77ae9952017-01-27 09:40:07 +0000419 case I915_PARAM_HAS_EXEC_ASYNC:
Chris Wilsonfec04452017-01-27 09:40:08 +0000420 case I915_PARAM_HAS_EXEC_FENCE:
Chris Wilsonb0fd47a2017-04-15 10:39:02 +0100421 case I915_PARAM_HAS_EXEC_CAPTURE:
Chris Wilson1a71cf22017-06-16 15:05:23 +0100422 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +0100423 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
David Weinehall16162472016-09-02 13:46:17 +0300424 /* For the time being all of these are always true;
425 * if some supported hardware does not have one of these
426 * features this value needs to be provided from
427 * INTEL_INFO(), a feature macro, or similar.
428 */
429 value = 1;
430 break;
Chris Wilsond2b4b972017-11-10 14:26:33 +0000431 case I915_PARAM_HAS_CONTEXT_ISOLATION:
432 value = intel_engines_has_context_isolation(dev_priv);
433 break;
Robert Bragg7fed5552017-06-13 12:22:59 +0100434 case I915_PARAM_SLICE_MASK:
Jani Nikula02584042018-12-31 16:56:41 +0200435 value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
Robert Bragg7fed5552017-06-13 12:22:59 +0100436 if (!value)
437 return -ENODEV;
438 break;
Robert Braggf5320232017-06-13 12:23:00 +0100439 case I915_PARAM_SUBSLICE_MASK:
Jani Nikula02584042018-12-31 16:56:41 +0200440 value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
Robert Braggf5320232017-06-13 12:23:00 +0100441 if (!value)
442 return -ENODEV;
443 break;
Lionel Landwerlindab91782017-11-10 19:08:44 +0000444 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
Jani Nikula02584042018-12-31 16:56:41 +0200445 value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
Lionel Landwerlindab91782017-11-10 19:08:44 +0000446 break;
Chris Wilson900ccf32018-07-20 11:19:10 +0100447 case I915_PARAM_MMAP_GTT_COHERENT:
448 value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
449 break;
Chris Wilson0673ad42016-06-24 14:00:22 +0100450 default:
451 DRM_DEBUG("Unknown parameter %d\n", param->param);
452 return -EINVAL;
453 }
454
Chris Wilsondda33002016-06-24 14:00:23 +0100455 if (put_user(value, param->value))
Chris Wilson0673ad42016-06-24 14:00:22 +0100456 return -EFAULT;
Chris Wilson0673ad42016-06-24 14:00:22 +0100457
458 return 0;
459}
460
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000461static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100462{
Sinan Kaya57b296462017-11-27 11:57:46 -0500463 int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
464
465 dev_priv->bridge_dev =
466 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
Chris Wilson0673ad42016-06-24 14:00:22 +0100467 if (!dev_priv->bridge_dev) {
468 DRM_ERROR("bridge device not found\n");
469 return -1;
470 }
471 return 0;
472}
473
474/* Allocate space for the MCH regs if needed, return nonzero on error */
475static int
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000476intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100477{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000478 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100479 u32 temp_lo, temp_hi = 0;
480 u64 mchbar_addr;
481 int ret;
482
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000483 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100484 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
485 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
486 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
487
488 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
489#ifdef CONFIG_PNP
490 if (mchbar_addr &&
491 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
492 return 0;
493#endif
494
495 /* Get some space for it */
496 dev_priv->mch_res.name = "i915 MCHBAR";
497 dev_priv->mch_res.flags = IORESOURCE_MEM;
498 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
499 &dev_priv->mch_res,
500 MCHBAR_SIZE, MCHBAR_SIZE,
501 PCIBIOS_MIN_MEM,
502 0, pcibios_align_resource,
503 dev_priv->bridge_dev);
504 if (ret) {
505 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
506 dev_priv->mch_res.start = 0;
507 return ret;
508 }
509
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000510 if (INTEL_GEN(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100511 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
512 upper_32_bits(dev_priv->mch_res.start));
513
514 pci_write_config_dword(dev_priv->bridge_dev, reg,
515 lower_32_bits(dev_priv->mch_res.start));
516 return 0;
517}
518
519/* Setup MCHBAR if possible, return true if we should disable it again */
520static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000521intel_setup_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100522{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000523 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100524 u32 temp;
525 bool enabled;
526
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100527 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100528 return;
529
530 dev_priv->mchbar_need_disable = false;
531
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100532 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100533 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
534 enabled = !!(temp & DEVEN_MCHBAR_EN);
535 } else {
536 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
537 enabled = temp & 1;
538 }
539
540 /* If it's already enabled, don't have to do anything */
541 if (enabled)
542 return;
543
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000544 if (intel_alloc_mchbar_resource(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100545 return;
546
547 dev_priv->mchbar_need_disable = true;
548
549 /* Space is allocated or reserved, so enable it. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100550 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100551 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
552 temp | DEVEN_MCHBAR_EN);
553 } else {
554 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
555 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
556 }
557}
558
559static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000560intel_teardown_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100561{
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000562 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100563
564 if (dev_priv->mchbar_need_disable) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100565 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100566 u32 deven_val;
567
568 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
569 &deven_val);
570 deven_val &= ~DEVEN_MCHBAR_EN;
571 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
572 deven_val);
573 } else {
574 u32 mchbar_val;
575
576 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
577 &mchbar_val);
578 mchbar_val &= ~1;
579 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
580 mchbar_val);
581 }
582 }
583
584 if (dev_priv->mch_res.start)
585 release_resource(&dev_priv->mch_res);
586}
587
588/* true = enable decode, false = disable decoder */
589static unsigned int i915_vga_set_decode(void *cookie, bool state)
590{
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000591 struct drm_i915_private *dev_priv = cookie;
Chris Wilson0673ad42016-06-24 14:00:22 +0100592
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000593 intel_modeset_vga_set_state(dev_priv, state);
Chris Wilson0673ad42016-06-24 14:00:22 +0100594 if (state)
595 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
596 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
597 else
598 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
599}
600
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +0000601static int i915_resume_switcheroo(struct drm_device *dev);
602static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
603
Chris Wilson0673ad42016-06-24 14:00:22 +0100604static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
605{
606 struct drm_device *dev = pci_get_drvdata(pdev);
607 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
608
609 if (state == VGA_SWITCHEROO_ON) {
610 pr_info("switched on\n");
611 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
612 /* i915 resume handler doesn't set to D0 */
David Weinehall52a05c32016-08-22 13:32:44 +0300613 pci_set_power_state(pdev, PCI_D0);
Chris Wilson0673ad42016-06-24 14:00:22 +0100614 i915_resume_switcheroo(dev);
615 dev->switch_power_state = DRM_SWITCH_POWER_ON;
616 } else {
617 pr_info("switched off\n");
618 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
619 i915_suspend_switcheroo(dev, pmm);
620 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
621 }
622}
623
624static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
625{
626 struct drm_device *dev = pci_get_drvdata(pdev);
627
628 /*
629 * FIXME: open_count is protected by drm_global_mutex but that would lead to
630 * locking inversion with the driver load path. And the access here is
631 * completely racy anyway. So don't bother with locking for now.
632 */
633 return dev->open_count == 0;
634}
635
636static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
637 .set_gpu_state = i915_switcheroo_set_state,
638 .reprobe = NULL,
639 .can_switch = i915_switcheroo_can_switch,
640};
641
Chris Wilson0673ad42016-06-24 14:00:22 +0100642static int i915_load_modeset_init(struct drm_device *dev)
643{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100644 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +0300645 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100646 int ret;
647
648 if (i915_inject_load_failure())
649 return -ENODEV;
650
José Roberto de Souzae1bf0942018-11-30 15:20:47 -0800651 if (HAS_DISPLAY(dev_priv)) {
José Roberto de Souza8d3bf1a2018-11-07 16:16:44 -0800652 ret = drm_vblank_init(&dev_priv->drm,
653 INTEL_INFO(dev_priv)->num_pipes);
654 if (ret)
655 goto out;
656 }
657
Jani Nikula66578852017-03-10 15:27:57 +0200658 intel_bios_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100659
660 /* If we have > 1 VGA cards, then we need to arbitrate access
661 * to the common VGA resources.
662 *
663 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
664 * then we do not take part in VGA arbitration and the
665 * vga_client_register() fails with -ENODEV.
666 */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000667 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
Chris Wilson0673ad42016-06-24 14:00:22 +0100668 if (ret && ret != -ENODEV)
669 goto out;
670
671 intel_register_dsm_handler();
672
David Weinehall52a05c32016-08-22 13:32:44 +0300673 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
Chris Wilson0673ad42016-06-24 14:00:22 +0100674 if (ret)
675 goto cleanup_vga_client;
676
677 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
678 intel_update_rawclk(dev_priv);
679
680 intel_power_domains_init_hw(dev_priv, false);
681
682 intel_csr_ucode_init(dev_priv);
683
684 ret = intel_irq_install(dev_priv);
685 if (ret)
686 goto cleanup_csr;
687
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000688 intel_setup_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100689
690 /* Important: The output setup functions called by modeset_init need
691 * working irqs for e.g. gmbus and dp aux transfers. */
Ville Syrjäläb079bd172016-10-25 18:58:02 +0300692 ret = intel_modeset_init(dev);
693 if (ret)
694 goto cleanup_irq;
Chris Wilson0673ad42016-06-24 14:00:22 +0100695
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000696 ret = i915_gem_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100697 if (ret)
Chris Wilson73bad7c2018-07-10 10:44:21 +0100698 goto cleanup_modeset;
Chris Wilson0673ad42016-06-24 14:00:22 +0100699
José Roberto de Souza58db08a72018-11-07 16:16:47 -0800700 intel_overlay_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100701
José Roberto de Souzae1bf0942018-11-30 15:20:47 -0800702 if (!HAS_DISPLAY(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100703 return 0;
704
705 ret = intel_fbdev_init(dev);
706 if (ret)
707 goto cleanup_gem;
708
709 /* Only enable hotplug handling once the fbdev is fully set up. */
710 intel_hpd_init(dev_priv);
711
José Roberto de Souzaa8147d02018-11-07 16:16:46 -0800712 intel_init_ipc(dev_priv);
713
Chris Wilson0673ad42016-06-24 14:00:22 +0100714 return 0;
715
716cleanup_gem:
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +0000717 if (i915_gem_suspend(dev_priv))
Imre Deak1c777c52016-10-12 17:46:37 +0300718 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100719 i915_gem_fini(dev_priv);
Chris Wilson73bad7c2018-07-10 10:44:21 +0100720cleanup_modeset:
721 intel_modeset_cleanup(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100722cleanup_irq:
Chris Wilson0673ad42016-06-24 14:00:22 +0100723 drm_irq_uninstall(dev);
Tvrtko Ursulin40196442016-12-01 14:16:42 +0000724 intel_teardown_gmbus(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100725cleanup_csr:
726 intel_csr_ucode_fini(dev_priv);
Imre Deak48a287e2018-08-06 12:58:35 +0300727 intel_power_domains_fini_hw(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +0300728 vga_switcheroo_unregister_client(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100729cleanup_vga_client:
David Weinehall52a05c32016-08-22 13:32:44 +0300730 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +0100731out:
732 return ret;
733}
734
Chris Wilson0673ad42016-06-24 14:00:22 +0100735static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
736{
737 struct apertures_struct *ap;
Chris Wilson91c8a322016-07-05 10:40:23 +0100738 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100739 struct i915_ggtt *ggtt = &dev_priv->ggtt;
740 bool primary;
741 int ret;
742
743 ap = alloc_apertures(1);
744 if (!ap)
745 return -ENOMEM;
746
Matthew Auld73ebd502017-12-11 15:18:20 +0000747 ap->ranges[0].base = ggtt->gmadr.start;
Chris Wilson0673ad42016-06-24 14:00:22 +0100748 ap->ranges[0].size = ggtt->mappable_end;
749
750 primary =
751 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
752
Daniel Vetter44adece2016-08-10 18:52:34 +0200753 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
Chris Wilson0673ad42016-06-24 14:00:22 +0100754
755 kfree(ap);
756
757 return ret;
758}
Chris Wilson0673ad42016-06-24 14:00:22 +0100759
760#if !defined(CONFIG_VGA_CONSOLE)
761static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
762{
763 return 0;
764}
765#elif !defined(CONFIG_DUMMY_CONSOLE)
766static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
767{
768 return -ENODEV;
769}
770#else
771static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
772{
773 int ret = 0;
774
775 DRM_INFO("Replacing VGA console driver\n");
776
777 console_lock();
778 if (con_is_bound(&vga_con))
779 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
780 if (ret == 0) {
781 ret = do_unregister_con_driver(&vga_con);
782
783 /* Ignore "already unregistered". */
784 if (ret == -ENODEV)
785 ret = 0;
786 }
787 console_unlock();
788
789 return ret;
790}
791#endif
792
Chris Wilson0673ad42016-06-24 14:00:22 +0100793static void intel_init_dpio(struct drm_i915_private *dev_priv)
794{
795 /*
796 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
797 * CHV x1 PHY (DP/HDMI D)
798 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
799 */
800 if (IS_CHERRYVIEW(dev_priv)) {
801 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
802 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
803 } else if (IS_VALLEYVIEW(dev_priv)) {
804 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
805 }
806}
807
808static int i915_workqueues_init(struct drm_i915_private *dev_priv)
809{
810 /*
811 * The i915 workqueue is primarily used for batched retirement of
812 * requests (and thus managing bo) once the task has been completed
Chris Wilsone61e0f52018-02-21 09:56:36 +0000813 * by the GPU. i915_retire_requests() is called directly when we
Chris Wilson0673ad42016-06-24 14:00:22 +0100814 * need high-priority retirement, such as waiting for an explicit
815 * bo.
816 *
817 * It is also used for periodic low-priority events, such as
818 * idle-timers and recording error state.
819 *
820 * All tasks on the workqueue are expected to acquire the dev mutex
821 * so there is no point in running more than one instance of the
822 * workqueue at any time. Use an ordered one.
823 */
824 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
825 if (dev_priv->wq == NULL)
826 goto out_err;
827
828 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
829 if (dev_priv->hotplug.dp_wq == NULL)
830 goto out_free_wq;
831
Chris Wilson0673ad42016-06-24 14:00:22 +0100832 return 0;
833
Chris Wilson0673ad42016-06-24 14:00:22 +0100834out_free_wq:
835 destroy_workqueue(dev_priv->wq);
836out_err:
837 DRM_ERROR("Failed to allocate workqueues.\n");
838
839 return -ENOMEM;
840}
841
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000842static void i915_engines_cleanup(struct drm_i915_private *i915)
843{
844 struct intel_engine_cs *engine;
845 enum intel_engine_id id;
846
847 for_each_engine(engine, i915, id)
848 kfree(engine);
849}
850
Chris Wilson0673ad42016-06-24 14:00:22 +0100851static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
852{
Chris Wilson0673ad42016-06-24 14:00:22 +0100853 destroy_workqueue(dev_priv->hotplug.dp_wq);
854 destroy_workqueue(dev_priv->wq);
855}
856
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300857/*
858 * We don't keep the workarounds for pre-production hardware, so we expect our
859 * driver to fail on these machines in one way or another. A little warning on
860 * dmesg may help both the user and the bug triagers.
Chris Wilson6a7a6a92017-11-17 10:26:35 +0000861 *
862 * Our policy for removing pre-production workarounds is to keep the
863 * current gen workarounds as a guide to the bring-up of the next gen
864 * (workarounds have a habit of persisting!). Anything older than that
865 * should be removed along with the complications they introduce.
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300866 */
867static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
868{
Chris Wilson248a1242017-01-30 10:44:56 +0000869 bool pre = false;
870
871 pre |= IS_HSW_EARLY_SDV(dev_priv);
872 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
Chris Wilson0102ba12017-01-30 10:44:58 +0000873 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
Chris Wilson1aca96c2018-11-28 13:53:25 +0000874 pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
Chris Wilson248a1242017-01-30 10:44:56 +0000875
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000876 if (pre) {
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300877 DRM_ERROR("This is a pre-production stepping. "
878 "It may not be fully functional.\n");
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000879 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
880 }
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300881}
882
Chris Wilson0673ad42016-06-24 14:00:22 +0100883/**
884 * i915_driver_init_early - setup state not requiring device access
885 * @dev_priv: device private
886 *
887 * Initialize everything that is a "SW-only" state, that is state not
888 * requiring accessing the device or exposing the driver via kernel internal
889 * or userspace interfaces. Example steps belonging here: lock initialization,
890 * system memory allocation, setting up device specific attributes and
891 * function hooks not requiring accessing the device.
892 */
Chris Wilson55ac5a12018-09-05 15:09:20 +0100893static int i915_driver_init_early(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100894{
Chris Wilson0673ad42016-06-24 14:00:22 +0100895 int ret = 0;
896
897 if (i915_inject_load_failure())
898 return -ENODEV;
899
Chris Wilson0673ad42016-06-24 14:00:22 +0100900 spin_lock_init(&dev_priv->irq_lock);
901 spin_lock_init(&dev_priv->gpu_error.lock);
902 mutex_init(&dev_priv->backlight_lock);
903 spin_lock_init(&dev_priv->uncore.lock);
Lyude317eaa92017-02-03 21:18:25 -0500904
Chris Wilson0673ad42016-06-24 14:00:22 +0100905 mutex_init(&dev_priv->sb_lock);
Chris Wilson0673ad42016-06-24 14:00:22 +0100906 mutex_init(&dev_priv->av_mutex);
907 mutex_init(&dev_priv->wm.wm_mutex);
908 mutex_init(&dev_priv->pps_mutex);
Ramalingam C9055aac2019-02-16 23:06:51 +0530909 mutex_init(&dev_priv->hdcp_comp_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +0100910
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100911 i915_memcpy_init_early(dev_priv);
Chris Wilsonbd780f32019-01-14 14:21:09 +0000912 intel_runtime_pm_init_early(dev_priv);
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100913
Chris Wilson0673ad42016-06-24 14:00:22 +0100914 ret = i915_workqueues_init(dev_priv);
915 if (ret < 0)
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000916 goto err_engines;
Chris Wilson0673ad42016-06-24 14:00:22 +0100917
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000918 ret = i915_gem_init_early(dev_priv);
919 if (ret < 0)
920 goto err_workqueues;
921
Chris Wilson0673ad42016-06-24 14:00:22 +0100922 /* This must be called before any calls to HAS_PCH_* */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000923 intel_detect_pch(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100924
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000925 intel_wopcm_init_early(&dev_priv->wopcm);
926 intel_uc_init_early(dev_priv);
Tvrtko Ursulin192aa182016-12-01 14:16:45 +0000927 intel_pm_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100928 intel_init_dpio(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300929 ret = intel_power_domains_init(dev_priv);
930 if (ret < 0)
931 goto err_uc;
Chris Wilson0673ad42016-06-24 14:00:22 +0100932 intel_irq_init(dev_priv);
Mika Kuoppala3ac168a2016-11-01 18:43:03 +0200933 intel_hangcheck_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100934 intel_init_display_hooks(dev_priv);
935 intel_init_clock_gating_hooks(dev_priv);
936 intel_init_audio_hooks(dev_priv);
David Weinehall36cdd012016-08-22 13:59:31 +0300937 intel_display_crc_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100938
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300939 intel_detect_preproduction_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100940
941 return 0;
942
Imre Deakf28ec6f2018-08-06 12:58:37 +0300943err_uc:
944 intel_uc_cleanup_early(dev_priv);
945 i915_gem_cleanup_early(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000946err_workqueues:
Chris Wilson0673ad42016-06-24 14:00:22 +0100947 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000948err_engines:
949 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100950 return ret;
951}
952
953/**
954 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
955 * @dev_priv: device private
956 */
957static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
958{
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300959 intel_irq_fini(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300960 intel_power_domains_cleanup(dev_priv);
Michal Wajdeczko8c650ae2018-03-23 12:34:50 +0000961 intel_uc_cleanup_early(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000962 i915_gem_cleanup_early(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100963 i915_workqueues_cleanup(dev_priv);
Chris Wilsonbb8f0f52017-01-24 11:01:34 +0000964 i915_engines_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100965}
966
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000967static int i915_mmio_setup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100968{
David Weinehall52a05c32016-08-22 13:32:44 +0300969 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +0100970 int mmio_bar;
971 int mmio_size;
972
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800973 mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
Chris Wilson0673ad42016-06-24 14:00:22 +0100974 /*
975 * Before gen4, the registers and the GTT are behind different BARs.
976 * However, from gen4 onwards, the registers and the GTT are shared
977 * in the same BAR, so we want to restrict this ioremap from
978 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
979 * the register BAR remains the same size for all the earlier
980 * generations up to Ironlake.
981 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +0000982 if (INTEL_GEN(dev_priv) < 5)
Chris Wilson0673ad42016-06-24 14:00:22 +0100983 mmio_size = 512 * 1024;
984 else
985 mmio_size = 2 * 1024 * 1024;
David Weinehall52a05c32016-08-22 13:32:44 +0300986 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
Chris Wilson0673ad42016-06-24 14:00:22 +0100987 if (dev_priv->regs == NULL) {
988 DRM_ERROR("failed to map registers\n");
989
990 return -EIO;
991 }
992
993 /* Try to make sure MCHBAR is enabled before poking at it */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000994 intel_setup_mchbar(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100995
996 return 0;
997}
998
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000999static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +01001000{
David Weinehall52a05c32016-08-22 13:32:44 +03001001 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001002
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001003 intel_teardown_mchbar(dev_priv);
David Weinehall52a05c32016-08-22 13:32:44 +03001004 pci_iounmap(pdev, dev_priv->regs);
Chris Wilson0673ad42016-06-24 14:00:22 +01001005}
1006
1007/**
1008 * i915_driver_init_mmio - setup device MMIO
1009 * @dev_priv: device private
1010 *
1011 * Setup minimal device state necessary for MMIO accesses later in the
1012 * initialization sequence. The setup here should avoid any other device-wide
1013 * side effects or exposing the driver via kernel internal or user space
1014 * interfaces.
1015 */
1016static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1017{
Chris Wilson0673ad42016-06-24 14:00:22 +01001018 int ret;
1019
1020 if (i915_inject_load_failure())
1021 return -ENODEV;
1022
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001023 if (i915_get_bridge_dev(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +01001024 return -EIO;
1025
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001026 ret = i915_mmio_setup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001027 if (ret < 0)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001028 goto err_bridge;
Chris Wilson0673ad42016-06-24 14:00:22 +01001029
1030 intel_uncore_init(dev_priv);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001031
Oscar Mateo26376a72018-03-16 14:14:49 +02001032 intel_device_info_init_mmio(dev_priv);
1033
1034 intel_uncore_prune(dev_priv);
1035
Sagar Arun Kamble1fc556f2017-10-04 15:33:24 +00001036 intel_uc_init_mmio(dev_priv);
1037
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001038 ret = intel_engines_init_mmio(dev_priv);
1039 if (ret)
1040 goto err_uncore;
1041
Chris Wilson24145512017-01-24 11:01:35 +00001042 i915_gem_init_mmio(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001043
1044 return 0;
1045
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001046err_uncore:
1047 intel_uncore_fini(dev_priv);
Michal Wajdeczkoc5b083a2018-10-11 13:00:07 +00001048 i915_mmio_cleanup(dev_priv);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +03001049err_bridge:
Chris Wilson0673ad42016-06-24 14:00:22 +01001050 pci_dev_put(dev_priv->bridge_dev);
1051
1052 return ret;
1053}
1054
1055/**
1056 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1057 * @dev_priv: device private
1058 */
1059static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1060{
Chris Wilson0673ad42016-06-24 14:00:22 +01001061 intel_uncore_fini(dev_priv);
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +00001062 i915_mmio_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001063 pci_dev_put(dev_priv->bridge_dev);
1064}
1065
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001066static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1067{
Chuanxiao Dong67b7f332017-05-27 17:44:17 +08001068 intel_gvt_sanitize_options(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001069}
1070
Ville Syrjälä54561b22019-03-06 22:35:42 +02001071static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
1072{
1073 return dimm->ranks * 64 / (dimm->width ?: 1);
1074}
1075
Ville Syrjäläea411e62019-03-06 22:35:41 +02001076/* Returns total GB for the whole DIMM */
1077static int skl_get_dimm_size(u16 val)
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301078{
Ville Syrjäläea411e62019-03-06 22:35:41 +02001079 return val & SKL_DRAM_SIZE_MASK;
1080}
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301081
Ville Syrjäläea411e62019-03-06 22:35:41 +02001082static int skl_get_dimm_width(u16 val)
1083{
1084 if (skl_get_dimm_size(val) == 0)
1085 return 0;
1086
1087 switch (val & SKL_DRAM_WIDTH_MASK) {
1088 case SKL_DRAM_WIDTH_X8:
1089 case SKL_DRAM_WIDTH_X16:
1090 case SKL_DRAM_WIDTH_X32:
1091 val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
1092 return 8 << val;
1093 default:
1094 MISSING_CASE(val);
1095 return 0;
1096 }
1097}
1098
1099static int skl_get_dimm_ranks(u16 val)
1100{
1101 if (skl_get_dimm_size(val) == 0)
1102 return 0;
1103
1104 val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
1105
1106 return val + 1;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301107}
1108
Mahesh Kumar86b59282018-08-31 16:39:42 +05301109static bool
Ville Syrjälä54561b22019-03-06 22:35:42 +02001110skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
Mahesh Kumar86b59282018-08-31 16:39:42 +05301111{
Ville Syrjälä54561b22019-03-06 22:35:42 +02001112 /* Convert total GB to Gb per DRAM device */
1113 return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
Mahesh Kumar86b59282018-08-31 16:39:42 +05301114}
1115
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001116static void
1117skl_dram_get_dimm_info(struct dram_dimm_info *dimm,
1118 int channel, char dimm_name, u16 val)
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301119{
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001120 dimm->size = skl_get_dimm_size(val);
1121 dimm->width = skl_get_dimm_width(val);
1122 dimm->ranks = skl_get_dimm_ranks(val);
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301123
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001124 DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
1125 channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
1126 yesno(skl_is_16gb_dimm(dimm)));
1127}
Ville Syrjäläea411e62019-03-06 22:35:41 +02001128
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001129static int
1130skl_dram_get_channel_info(struct dram_channel_info *ch,
1131 int channel, u32 val)
1132{
1133 skl_dram_get_dimm_info(&ch->l_info, channel, 'L', val & 0xffff);
1134 skl_dram_get_dimm_info(&ch->s_info, channel, 'S', val >> 16);
Ville Syrjäläea411e62019-03-06 22:35:41 +02001135
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001136 if (ch->l_info.size == 0 && ch->s_info.size == 0) {
1137 DRM_DEBUG_KMS("CH%u not populated\n", channel);
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301138 return -EINVAL;
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001139 }
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301140
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001141 if (ch->l_info.ranks == 2 || ch->s_info.ranks == 2)
1142 ch->ranks = 2;
1143 else if (ch->l_info.ranks == 1 && ch->s_info.ranks == 1)
1144 ch->ranks = 2;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301145 else
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001146 ch->ranks = 1;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301147
Ville Syrjälä54561b22019-03-06 22:35:42 +02001148 ch->is_16gb_dimm =
1149 skl_is_16gb_dimm(&ch->l_info) ||
1150 skl_is_16gb_dimm(&ch->s_info);
Mahesh Kumar86b59282018-08-31 16:39:42 +05301151
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001152 DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
1153 channel, ch->ranks, yesno(ch->is_16gb_dimm));
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301154
1155 return 0;
1156}
1157
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301158static bool
1159intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
1160 struct dram_channel_info *ch0)
1161{
1162 return (val_ch0 == val_ch1 &&
1163 (ch0->s_info.size == 0 ||
1164 (ch0->l_info.size == ch0->s_info.size &&
1165 ch0->l_info.width == ch0->s_info.width &&
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001166 ch0->l_info.ranks == ch0->s_info.ranks)));
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301167}
1168
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301169static int
1170skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
1171{
1172 struct dram_info *dram_info = &dev_priv->dram_info;
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001173 struct dram_channel_info ch0 = {}, ch1 = {};
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301174 u32 val_ch0, val_ch1;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301175 int ret;
1176
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301177 val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001178 ret = skl_dram_get_channel_info(&ch0, 0, val_ch0);
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301179 if (ret == 0)
1180 dram_info->num_channels++;
1181
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301182 val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
Ville Syrjälä198b8dd2019-03-06 22:35:46 +02001183 ret = skl_dram_get_channel_info(&ch1, 1, val_ch1);
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301184 if (ret == 0)
1185 dram_info->num_channels++;
1186
1187 if (dram_info->num_channels == 0) {
1188 DRM_INFO("Number of memory channels is zero\n");
1189 return -EINVAL;
1190 }
1191
1192 /*
1193 * If any of the channel is single rank channel, worst case output
1194 * will be same as if single rank memory, so consider single rank
1195 * memory.
1196 */
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001197 if (ch0.ranks == 1 || ch1.ranks == 1)
1198 dram_info->ranks = 1;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301199 else
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001200 dram_info->ranks = max(ch0.ranks, ch1.ranks);
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301201
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001202 if (dram_info->ranks == 0) {
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301203 DRM_INFO("couldn't get memory rank information\n");
1204 return -EINVAL;
1205 }
Mahesh Kumar86b59282018-08-31 16:39:42 +05301206
Ville Syrjälä5d6f36b2018-10-23 21:21:02 +03001207 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
Mahesh Kumar86b59282018-08-31 16:39:42 +05301208
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301209 dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
1210 val_ch1,
1211 &ch0);
1212
1213 DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
1214 dev_priv->dram_info.symmetric_memory ? "" : "not ");
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301215 return 0;
1216}
1217
1218static int
1219skl_get_dram_info(struct drm_i915_private *dev_priv)
1220{
1221 struct dram_info *dram_info = &dev_priv->dram_info;
1222 u32 mem_freq_khz, val;
1223 int ret;
1224
1225 ret = skl_dram_get_channels_info(dev_priv);
1226 if (ret)
1227 return ret;
1228
1229 val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
1230 mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
1231 SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1232
1233 dram_info->bandwidth_kbps = dram_info->num_channels *
1234 mem_freq_khz * 8;
1235
1236 if (dram_info->bandwidth_kbps == 0) {
1237 DRM_INFO("Couldn't get system memory bandwidth\n");
1238 return -EINVAL;
1239 }
1240
1241 dram_info->valid = true;
1242 return 0;
1243}
1244
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001245/* Returns Gb per DRAM device */
1246static int bxt_get_dimm_size(u32 val)
1247{
1248 switch (val & BXT_DRAM_SIZE_MASK) {
Ville Syrjälä88603432019-03-06 22:35:44 +02001249 case BXT_DRAM_SIZE_4GBIT:
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001250 return 4;
Ville Syrjälä88603432019-03-06 22:35:44 +02001251 case BXT_DRAM_SIZE_6GBIT:
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001252 return 6;
Ville Syrjälä88603432019-03-06 22:35:44 +02001253 case BXT_DRAM_SIZE_8GBIT:
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001254 return 8;
Ville Syrjälä88603432019-03-06 22:35:44 +02001255 case BXT_DRAM_SIZE_12GBIT:
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001256 return 12;
Ville Syrjälä88603432019-03-06 22:35:44 +02001257 case BXT_DRAM_SIZE_16GBIT:
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001258 return 16;
1259 default:
1260 MISSING_CASE(val);
1261 return 0;
1262 }
1263}
1264
1265static int bxt_get_dimm_width(u32 val)
1266{
1267 if (!bxt_get_dimm_size(val))
1268 return 0;
1269
1270 val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
1271
1272 return 8 << val;
1273}
1274
1275static int bxt_get_dimm_ranks(u32 val)
1276{
1277 if (!bxt_get_dimm_size(val))
1278 return 0;
1279
1280 switch (val & BXT_DRAM_RANK_MASK) {
1281 case BXT_DRAM_RANK_SINGLE:
1282 return 1;
1283 case BXT_DRAM_RANK_DUAL:
1284 return 2;
1285 default:
1286 MISSING_CASE(val);
1287 return 0;
1288 }
1289}
1290
1291static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
1292 u32 val)
1293{
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001294 dimm->width = bxt_get_dimm_width(val);
1295 dimm->ranks = bxt_get_dimm_ranks(val);
Ville Syrjälä88603432019-03-06 22:35:44 +02001296
1297 /*
1298 * Size in register is Gb per DRAM device. Convert to total
1299 * GB to match the way we report this for non-LP platforms.
1300 */
1301 dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001302}
1303
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301304static int
1305bxt_get_dram_info(struct drm_i915_private *dev_priv)
1306{
1307 struct dram_info *dram_info = &dev_priv->dram_info;
1308 u32 dram_channels;
1309 u32 mem_freq_khz, val;
1310 u8 num_active_channels;
1311 int i;
1312
1313 val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1314 mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1315 BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1316
1317 dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1318 num_active_channels = hweight32(dram_channels);
1319
1320 /* Each active bit represents 4-byte channel */
1321 dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1322
1323 if (dram_info->bandwidth_kbps == 0) {
1324 DRM_INFO("Couldn't get system memory bandwidth\n");
1325 return -EINVAL;
1326 }
1327
1328 /*
1329 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1330 */
1331 for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001332 struct dram_dimm_info dimm;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301333
1334 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1335 if (val == 0xFFFFFFFF)
1336 continue;
1337
1338 dram_info->num_channels++;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301339
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001340 bxt_get_dimm_info(&dimm, val);
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301341
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001342 DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u\n",
1343 i - BXT_D_CR_DRP0_DUNIT_START,
1344 dimm.size, dimm.width, dimm.ranks);
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301345
1346 /*
1347 * If any of the channel is single rank channel,
1348 * worst case output will be same as if single rank
1349 * memory, so consider single rank memory.
1350 */
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001351 if (dram_info->ranks == 0)
Ville Syrjäläa62819a2019-03-06 22:35:43 +02001352 dram_info->ranks = dimm.ranks;
1353 else if (dimm.ranks == 1)
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001354 dram_info->ranks = 1;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301355 }
1356
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001357 if (dram_info->ranks == 0) {
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301358 DRM_INFO("couldn't get memory rank information\n");
1359 return -EINVAL;
1360 }
1361
1362 dram_info->valid = true;
1363 return 0;
1364}
1365
1366static void
1367intel_get_dram_info(struct drm_i915_private *dev_priv)
1368{
1369 struct dram_info *dram_info = &dev_priv->dram_info;
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301370 char bandwidth_str[32];
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301371 int ret;
1372
1373 dram_info->valid = false;
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001374 dram_info->ranks = 0;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301375 dram_info->bandwidth_kbps = 0;
1376 dram_info->num_channels = 0;
1377
Ville Syrjälä5d6f36b2018-10-23 21:21:02 +03001378 /*
1379 * Assume 16Gb DIMMs are present until proven otherwise.
1380 * This is only used for the level 0 watermark latency
1381 * w/a which does not apply to bxt/glk.
1382 */
1383 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1384
Ville Syrjälä331ecde2019-03-06 22:35:45 +02001385 if (INTEL_GEN(dev_priv) < 9)
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301386 return;
1387
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301388 /* Need to calculate bandwidth only for Gen9 */
Ville Syrjälä331ecde2019-03-06 22:35:45 +02001389 if (IS_GEN9_LP(dev_priv))
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301390 ret = bxt_get_dram_info(dev_priv);
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001391 else if (IS_GEN(dev_priv, 9))
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301392 ret = skl_get_dram_info(dev_priv);
1393 else
1394 ret = skl_dram_get_channels_info(dev_priv);
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301395 if (ret)
1396 return;
1397
Mahesh Kumar5771caf2018-08-24 15:02:22 +05301398 if (dram_info->bandwidth_kbps)
1399 sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
1400 else
1401 sprintf(bandwidth_str, "unknown");
1402 DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
1403 bandwidth_str, dram_info->num_channels);
Ville Syrjälä54561b22019-03-06 22:35:42 +02001404 DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001405 dram_info->ranks, yesno(dram_info->is_16gb_dimm));
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301406}
1407
Chris Wilson0673ad42016-06-24 14:00:22 +01001408/**
1409 * i915_driver_init_hw - setup state requiring device access
1410 * @dev_priv: device private
1411 *
1412 * Setup state that requires accessing the device, but doesn't require
1413 * exposing the driver via kernel internal or userspace interfaces.
1414 */
1415static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1416{
David Weinehall52a05c32016-08-22 13:32:44 +03001417 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001418 int ret;
1419
1420 if (i915_inject_load_failure())
1421 return -ENODEV;
1422
Jani Nikula1400cc72018-12-31 16:56:43 +02001423 intel_device_info_runtime_init(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001424
Chris Wilson4bdafb92018-09-26 21:12:22 +01001425 if (HAS_PPGTT(dev_priv)) {
1426 if (intel_vgpu_active(dev_priv) &&
1427 !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
1428 i915_report_error(dev_priv,
1429 "incompatible vGPU found, support for isolated ppGTT required\n");
1430 return -ENXIO;
1431 }
1432 }
1433
Chris Wilson46592892018-11-30 12:59:54 +00001434 if (HAS_EXECLISTS(dev_priv)) {
1435 /*
1436 * Older GVT emulation depends upon intercepting CSB mmio,
1437 * which we no longer use, preferring to use the HWSP cache
1438 * instead.
1439 */
1440 if (intel_vgpu_active(dev_priv) &&
1441 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1442 i915_report_error(dev_priv,
1443 "old vGPU host found, support for HWSP emulation required\n");
1444 return -ENXIO;
1445 }
1446 }
1447
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001448 intel_sanitize_options(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001449
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01001450 i915_perf_init(dev_priv);
1451
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001452 ret = i915_ggtt_probe_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001453 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +01001454 goto err_perf;
Chris Wilson0673ad42016-06-24 14:00:22 +01001455
Chris Wilson9f172f62018-04-14 10:12:33 +01001456 /*
1457 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1458 * otherwise the vga fbdev driver falls over.
1459 */
Chris Wilson0673ad42016-06-24 14:00:22 +01001460 ret = i915_kick_out_firmware_fb(dev_priv);
1461 if (ret) {
1462 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001463 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001464 }
1465
1466 ret = i915_kick_out_vgacon(dev_priv);
1467 if (ret) {
1468 DRM_ERROR("failed to remove conflicting VGA console\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001469 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001470 }
1471
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001472 ret = i915_ggtt_init_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001473 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +01001474 goto err_ggtt;
Chris Wilson0088e522016-08-04 07:52:21 +01001475
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001476 ret = i915_ggtt_enable_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +01001477 if (ret) {
1478 DRM_ERROR("failed to enable GGTT\n");
Chris Wilson9f172f62018-04-14 10:12:33 +01001479 goto err_ggtt;
Chris Wilson0088e522016-08-04 07:52:21 +01001480 }
1481
David Weinehall52a05c32016-08-22 13:32:44 +03001482 pci_set_master(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001483
1484 /* overlay on gen2 is broken and can't address above 1G */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001485 if (IS_GEN(dev_priv, 2)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001486 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
Chris Wilson0673ad42016-06-24 14:00:22 +01001487 if (ret) {
1488 DRM_ERROR("failed to set DMA mask\n");
1489
Chris Wilson9f172f62018-04-14 10:12:33 +01001490 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001491 }
1492 }
1493
Chris Wilson0673ad42016-06-24 14:00:22 +01001494 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1495 * using 32bit addressing, overwriting memory if HWS is located
1496 * above 4GB.
1497 *
1498 * The documentation also mentions an issue with undefined
1499 * behaviour if any general state is accessed within a page above 4GB,
1500 * which also needs to be handled carefully.
1501 */
Jani Nikulac0f86832016-12-07 12:13:04 +02001502 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
David Weinehall52a05c32016-08-22 13:32:44 +03001503 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Chris Wilson0673ad42016-06-24 14:00:22 +01001504
1505 if (ret) {
1506 DRM_ERROR("failed to set DMA mask\n");
1507
Chris Wilson9f172f62018-04-14 10:12:33 +01001508 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +01001509 }
1510 }
1511
Chris Wilson0673ad42016-06-24 14:00:22 +01001512 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1513 PM_QOS_DEFAULT_VALUE);
1514
1515 intel_uncore_sanitize(dev_priv);
1516
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001517 intel_gt_init_workarounds(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001518 i915_gem_load_init_fences(dev_priv);
1519
1520 /* On the 945G/GM, the chipset reports the MSI capability on the
1521 * integrated graphics even though the support isn't actually there
1522 * according to the published specs. It doesn't appear to function
1523 * correctly in testing on 945G.
1524 * This may be a side effect of MSI having been made available for PEG
1525 * and the registers being closely associated.
1526 *
1527 * According to chipset errata, on the 965GM, MSI interrupts may
Ville Syrjäläe38c2da2017-06-26 23:30:51 +03001528 * be lost or delayed, and was defeatured. MSI interrupts seem to
1529 * get lost on g4x as well, and interrupt delivery seems to stay
1530 * properly dead afterwards. So we'll just disable them for all
1531 * pre-gen5 chipsets.
Lucas De Marchi8a29c772018-05-23 11:04:35 -07001532 *
1533 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1534 * interrupts even when in MSI mode. This results in spurious
1535 * interrupt warnings if the legacy irq no. is shared with another
1536 * device. The kernel then disables that interrupt source and so
1537 * prevents the other device from working properly.
Chris Wilson0673ad42016-06-24 14:00:22 +01001538 */
Ville Syrjäläe38c2da2017-06-26 23:30:51 +03001539 if (INTEL_GEN(dev_priv) >= 5) {
David Weinehall52a05c32016-08-22 13:32:44 +03001540 if (pci_enable_msi(pdev) < 0)
Chris Wilson0673ad42016-06-24 14:00:22 +01001541 DRM_DEBUG_DRIVER("can't enable MSI");
1542 }
1543
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001544 ret = intel_gvt_init(dev_priv);
1545 if (ret)
Chris Wilson7ab87ed2018-07-10 15:38:21 +01001546 goto err_msi;
1547
1548 intel_opregion_setup(dev_priv);
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301549 /*
1550 * Fill the dram structure to get the system raw bandwidth and
1551 * dram info. This will be used for memory latency calculation.
1552 */
1553 intel_get_dram_info(dev_priv);
1554
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001555
Chris Wilson0673ad42016-06-24 14:00:22 +01001556 return 0;
1557
Chris Wilson7ab87ed2018-07-10 15:38:21 +01001558err_msi:
1559 if (pdev->msi_enabled)
1560 pci_disable_msi(pdev);
1561 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson9f172f62018-04-14 10:12:33 +01001562err_ggtt:
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001563 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson9f172f62018-04-14 10:12:33 +01001564err_perf:
1565 i915_perf_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001566 return ret;
1567}
1568
1569/**
1570 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1571 * @dev_priv: device private
1572 */
1573static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1574{
David Weinehall52a05c32016-08-22 13:32:44 +03001575 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001576
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01001577 i915_perf_fini(dev_priv);
1578
David Weinehall52a05c32016-08-22 13:32:44 +03001579 if (pdev->msi_enabled)
1580 pci_disable_msi(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001581
1582 pm_qos_remove_request(&dev_priv->pm_qos);
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001583 i915_ggtt_cleanup_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001584}
1585
1586/**
1587 * i915_driver_register - register the driver with the rest of the system
1588 * @dev_priv: device private
1589 *
1590 * Perform any steps necessary to make the driver available via kernel
1591 * internal or userspace interfaces.
1592 */
1593static void i915_driver_register(struct drm_i915_private *dev_priv)
1594{
Chris Wilson91c8a322016-07-05 10:40:23 +01001595 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +01001596
Chris Wilson848b3652017-11-23 11:53:37 +00001597 i915_gem_shrinker_register(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001598 i915_pmu_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001599
1600 /*
1601 * Notify a valid surface after modesetting,
1602 * when running inside a VM.
1603 */
1604 if (intel_vgpu_active(dev_priv))
1605 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1606
1607 /* Reveal our presence to userspace */
1608 if (drm_dev_register(dev, 0) == 0) {
1609 i915_debugfs_register(dev_priv);
David Weinehall694c2822016-08-22 13:32:43 +03001610 i915_setup_sysfs(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00001611
1612 /* Depends on sysfs having been initialized */
1613 i915_perf_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001614 } else
1615 DRM_ERROR("Failed to register driver for userspace access!\n");
1616
José Roberto de Souzae1bf0942018-11-30 15:20:47 -08001617 if (HAS_DISPLAY(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +01001618 /* Must be done after probing outputs */
1619 intel_opregion_register(dev_priv);
1620 acpi_video_register();
1621 }
1622
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001623 if (IS_GEN(dev_priv, 5))
Chris Wilson0673ad42016-06-24 14:00:22 +01001624 intel_gpu_ips_init(dev_priv);
1625
Jerome Anandeef57322017-01-25 04:27:49 +05301626 intel_audio_init(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001627
1628 /*
1629 * Some ports require correctly set-up hpd registers for detection to
1630 * work properly (leading to ghost connected connector status), e.g. VGA
1631 * on gm45. Hence we can only set up the initial fbdev config after hpd
1632 * irqs are fully enabled. We do it last so that the async config
1633 * cannot run before the connectors are registered.
1634 */
1635 intel_fbdev_initial_config_async(dev);
Chris Wilson448aa912017-11-28 11:01:47 +00001636
1637 /*
1638 * We need to coordinate the hotplugs with the asynchronous fbdev
1639 * configuration, for which we use the fbdev->async_cookie.
1640 */
José Roberto de Souzae1bf0942018-11-30 15:20:47 -08001641 if (HAS_DISPLAY(dev_priv))
Chris Wilson448aa912017-11-28 11:01:47 +00001642 drm_kms_helper_poll_init(dev);
Chris Wilson07d80572018-08-16 15:37:56 +03001643
Imre Deak2cd9a682018-08-16 15:37:57 +03001644 intel_power_domains_enable(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001645 intel_runtime_pm_enable(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001646}
1647
1648/**
1649 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1650 * @dev_priv: device private
1651 */
1652static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1653{
Chris Wilson07d80572018-08-16 15:37:56 +03001654 intel_runtime_pm_disable(dev_priv);
Imre Deak2cd9a682018-08-16 15:37:57 +03001655 intel_power_domains_disable(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001656
Daniel Vetter4f256d82017-07-15 00:46:55 +02001657 intel_fbdev_unregister(dev_priv);
Jerome Anandeef57322017-01-25 04:27:49 +05301658 intel_audio_deinit(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001659
Chris Wilson448aa912017-11-28 11:01:47 +00001660 /*
1661 * After flushing the fbdev (incl. a late async config which will
1662 * have delayed queuing of a hotplug event), then flush the hotplug
1663 * events.
1664 */
1665 drm_kms_helper_poll_fini(&dev_priv->drm);
1666
Chris Wilson0673ad42016-06-24 14:00:22 +01001667 intel_gpu_ips_teardown();
1668 acpi_video_unregister();
1669 intel_opregion_unregister(dev_priv);
1670
Robert Bragg442b8c02016-11-07 19:49:53 +00001671 i915_perf_unregister(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001672 i915_pmu_unregister(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +00001673
David Weinehall694c2822016-08-22 13:32:43 +03001674 i915_teardown_sysfs(dev_priv);
Chris Wilson91c8a322016-07-05 10:40:23 +01001675 drm_dev_unregister(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001676
Chris Wilson848b3652017-11-23 11:53:37 +00001677 i915_gem_shrinker_unregister(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001678}
1679
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001680static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1681{
1682 if (drm_debug & DRM_UT_DRIVER) {
1683 struct drm_printer p = drm_debug_printer("i915 device info:");
1684
Jani Nikula1787a982018-12-31 16:56:45 +02001685 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
1686 INTEL_DEVID(dev_priv),
1687 INTEL_REVID(dev_priv),
1688 intel_platform_name(INTEL_INFO(dev_priv)->platform),
1689 INTEL_GEN(dev_priv));
1690
1691 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
Jani Nikula02584042018-12-31 16:56:41 +02001692 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001693 }
1694
1695 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1696 DRM_INFO("DRM_I915_DEBUG enabled\n");
1697 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1698 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
Imre Deak6dfc4a82018-08-16 22:34:14 +03001699 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1700 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001701}
1702
Chris Wilson55ac5a12018-09-05 15:09:20 +01001703static struct drm_i915_private *
1704i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1705{
1706 const struct intel_device_info *match_info =
1707 (struct intel_device_info *)ent->driver_data;
1708 struct intel_device_info *device_info;
1709 struct drm_i915_private *i915;
Andi Shyti2ddcc982018-10-02 12:20:47 +03001710 int err;
Chris Wilson55ac5a12018-09-05 15:09:20 +01001711
1712 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1713 if (!i915)
Andi Shyti2ddcc982018-10-02 12:20:47 +03001714 return ERR_PTR(-ENOMEM);
Chris Wilson55ac5a12018-09-05 15:09:20 +01001715
Andi Shyti2ddcc982018-10-02 12:20:47 +03001716 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1717 if (err) {
Chris Wilson55ac5a12018-09-05 15:09:20 +01001718 kfree(i915);
Andi Shyti2ddcc982018-10-02 12:20:47 +03001719 return ERR_PTR(err);
Chris Wilson55ac5a12018-09-05 15:09:20 +01001720 }
1721
1722 i915->drm.pdev = pdev;
1723 i915->drm.dev_private = i915;
1724 pci_set_drvdata(pdev, &i915->drm);
1725
1726 /* Setup the write-once "constant" device info */
1727 device_info = mkwrite_device_info(i915);
1728 memcpy(device_info, match_info, sizeof(*device_info));
Jani Nikula02584042018-12-31 16:56:41 +02001729 RUNTIME_INFO(i915)->device_id = pdev->device;
Chris Wilson55ac5a12018-09-05 15:09:20 +01001730
1731 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
Chris Wilson74f6e182018-09-26 11:47:07 +01001732 BITS_PER_TYPE(device_info->platform_mask));
1733 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
Chris Wilson55ac5a12018-09-05 15:09:20 +01001734
1735 return i915;
1736}
1737
Chris Wilson31962ca2018-09-05 15:09:21 +01001738static void i915_driver_destroy(struct drm_i915_private *i915)
1739{
1740 struct pci_dev *pdev = i915->drm.pdev;
1741
1742 drm_dev_fini(&i915->drm);
1743 kfree(i915);
1744
1745 /* And make sure we never chase our dangling pointer from pci_dev */
1746 pci_set_drvdata(pdev, NULL);
1747}
1748
Chris Wilson0673ad42016-06-24 14:00:22 +01001749/**
1750 * i915_driver_load - setup chip and create an initial config
Joonas Lahtinend2ad3ae2016-11-10 15:36:34 +02001751 * @pdev: PCI device
1752 * @ent: matching PCI ID entry
Chris Wilson0673ad42016-06-24 14:00:22 +01001753 *
1754 * The driver load routine has to do several things:
1755 * - drive output discovery via intel_modeset_init()
1756 * - initialize the memory manager
1757 * - allocate initial config memory
1758 * - setup the DRM framebuffer with the allocated memory
1759 */
Chris Wilson42f55512016-06-24 14:00:26 +01001760int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
Chris Wilson0673ad42016-06-24 14:00:22 +01001761{
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +01001762 const struct intel_device_info *match_info =
1763 (struct intel_device_info *)ent->driver_data;
Chris Wilson0673ad42016-06-24 14:00:22 +01001764 struct drm_i915_private *dev_priv;
1765 int ret;
1766
Chris Wilson55ac5a12018-09-05 15:09:20 +01001767 dev_priv = i915_driver_create(pdev, ent);
Andi Shyti2ddcc982018-10-02 12:20:47 +03001768 if (IS_ERR(dev_priv))
1769 return PTR_ERR(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001770
Ville Syrjälä1feb64c2018-09-13 16:16:22 +03001771 /* Disable nuclear pageflip by default on pre-ILK */
1772 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1773 dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1774
Chris Wilson0673ad42016-06-24 14:00:22 +01001775 ret = pci_enable_device(pdev);
1776 if (ret)
Chris Wilsoncad36882017-02-10 16:35:21 +00001777 goto out_fini;
Chris Wilson0673ad42016-06-24 14:00:22 +01001778
Chris Wilson55ac5a12018-09-05 15:09:20 +01001779 ret = i915_driver_init_early(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001780 if (ret < 0)
1781 goto out_pci_disable;
1782
Imre Deak2cd9a682018-08-16 15:37:57 +03001783 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001784
1785 ret = i915_driver_init_mmio(dev_priv);
1786 if (ret < 0)
1787 goto out_runtime_pm_put;
1788
1789 ret = i915_driver_init_hw(dev_priv);
1790 if (ret < 0)
1791 goto out_cleanup_mmio;
1792
Chris Wilson91c8a322016-07-05 10:40:23 +01001793 ret = i915_load_modeset_init(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +01001794 if (ret < 0)
Daniel Vetterbaf54382017-06-21 10:28:41 +02001795 goto out_cleanup_hw;
Chris Wilson0673ad42016-06-24 14:00:22 +01001796
1797 i915_driver_register(dev_priv);
1798
Imre Deak2cd9a682018-08-16 15:37:57 +03001799 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001800
Michal Wajdeczko27d558a2017-12-21 21:57:35 +00001801 i915_welcome_messages(dev_priv);
1802
Chris Wilson0673ad42016-06-24 14:00:22 +01001803 return 0;
1804
Chris Wilson0673ad42016-06-24 14:00:22 +01001805out_cleanup_hw:
1806 i915_driver_cleanup_hw(dev_priv);
1807out_cleanup_mmio:
1808 i915_driver_cleanup_mmio(dev_priv);
1809out_runtime_pm_put:
Imre Deak2cd9a682018-08-16 15:37:57 +03001810 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001811 i915_driver_cleanup_early(dev_priv);
1812out_pci_disable:
1813 pci_disable_device(pdev);
Chris Wilsoncad36882017-02-10 16:35:21 +00001814out_fini:
Chris Wilson0673ad42016-06-24 14:00:22 +01001815 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
Chris Wilson31962ca2018-09-05 15:09:21 +01001816 i915_driver_destroy(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001817 return ret;
1818}
1819
Chris Wilson42f55512016-06-24 14:00:26 +01001820void i915_driver_unload(struct drm_device *dev)
Chris Wilson0673ad42016-06-24 14:00:22 +01001821{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001822 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001823 struct pci_dev *pdev = dev_priv->drm.pdev;
Chris Wilson0673ad42016-06-24 14:00:22 +01001824
Imre Deak2cd9a682018-08-16 15:37:57 +03001825 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +03001826
Daniel Vetter99c539b2017-07-15 00:46:56 +02001827 i915_driver_unregister(dev_priv);
1828
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001829 /* Flush any external code that still may be under the RCU lock */
1830 synchronize_rcu();
1831
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001832 if (i915_gem_suspend(dev_priv))
Chris Wilson42f55512016-06-24 14:00:26 +01001833 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +01001834
Daniel Vetter18dddad2017-03-21 17:41:49 +01001835 drm_atomic_helper_shutdown(dev);
Maarten Lankhorsta667fb42016-12-15 15:29:44 +01001836
Zhenyu Wang26f837e2017-01-13 10:46:09 +08001837 intel_gvt_cleanup(dev_priv);
1838
Chris Wilson0673ad42016-06-24 14:00:22 +01001839 intel_modeset_cleanup(dev);
1840
Hans de Goede785f0762018-02-14 09:21:49 +01001841 intel_bios_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001842
David Weinehall52a05c32016-08-22 13:32:44 +03001843 vga_switcheroo_unregister_client(pdev);
1844 vga_client_register(pdev, NULL, NULL, NULL);
Chris Wilson0673ad42016-06-24 14:00:22 +01001845
1846 intel_csr_ucode_fini(dev_priv);
1847
1848 /* Free error state after interrupts are fully disabled. */
1849 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001850 i915_reset_error_state(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001851
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01001852 i915_gem_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001853
Imre Deak48a287e2018-08-06 12:58:35 +03001854 intel_power_domains_fini_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001855
1856 i915_driver_cleanup_hw(dev_priv);
1857 i915_driver_cleanup_mmio(dev_priv);
1858
Imre Deak2cd9a682018-08-16 15:37:57 +03001859 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonbd780f32019-01-14 14:21:09 +00001860 intel_runtime_pm_cleanup(dev_priv);
Chris Wilsoncad36882017-02-10 16:35:21 +00001861}
1862
1863static void i915_driver_release(struct drm_device *dev)
1864{
1865 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001866
1867 i915_driver_cleanup_early(dev_priv);
Chris Wilson31962ca2018-09-05 15:09:21 +01001868 i915_driver_destroy(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +01001869}
1870
1871static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1872{
Chris Wilson829a0af2017-06-20 12:05:45 +01001873 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +01001874 int ret;
1875
Chris Wilson829a0af2017-06-20 12:05:45 +01001876 ret = i915_gem_open(i915, file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001877 if (ret)
1878 return ret;
1879
1880 return 0;
1881}
1882
1883/**
1884 * i915_driver_lastclose - clean up after all DRM clients have exited
1885 * @dev: DRM device
1886 *
1887 * Take care of cleaning up after all DRM clients have exited. In the
1888 * mode setting case, we want to restore the kernel's initial mode (just
1889 * in case the last client left us in a bad state).
1890 *
1891 * Additionally, in the non-mode setting case, we'll tear down the GTT
1892 * and DMA structures, since the kernel won't be using them, and clea
1893 * up any GEM state.
1894 */
1895static void i915_driver_lastclose(struct drm_device *dev)
1896{
1897 intel_fbdev_restore_mode(dev);
1898 vga_switcheroo_process_delayed_switch();
1899}
1900
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001901static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
Chris Wilson0673ad42016-06-24 14:00:22 +01001902{
Daniel Vetter7d2ec882017-03-08 15:12:45 +01001903 struct drm_i915_file_private *file_priv = file->driver_priv;
1904
Chris Wilson0673ad42016-06-24 14:00:22 +01001905 mutex_lock(&dev->struct_mutex);
Chris Wilson829a0af2017-06-20 12:05:45 +01001906 i915_gem_context_close(file);
Chris Wilson0673ad42016-06-24 14:00:22 +01001907 i915_gem_release(dev, file);
1908 mutex_unlock(&dev->struct_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +01001909
1910 kfree(file_priv);
1911}
1912
Imre Deak07f9cd02014-08-18 14:42:45 +03001913static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1914{
Chris Wilson91c8a322016-07-05 10:40:23 +01001915 struct drm_device *dev = &dev_priv->drm;
Jani Nikula19c80542015-12-16 12:48:16 +02001916 struct intel_encoder *encoder;
Imre Deak07f9cd02014-08-18 14:42:45 +03001917
1918 drm_modeset_lock_all(dev);
Jani Nikula19c80542015-12-16 12:48:16 +02001919 for_each_intel_encoder(dev, encoder)
1920 if (encoder->suspend)
1921 encoder->suspend(encoder);
Imre Deak07f9cd02014-08-18 14:42:45 +03001922 drm_modeset_unlock_all(dev);
1923}
1924
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001925static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1926 bool rpm_resume);
Imre Deak507e1262016-04-20 20:27:54 +03001927static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
Suketu Shahf75a1982015-04-16 14:22:11 +05301928
Imre Deakbc872292015-11-18 17:32:30 +02001929static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1930{
1931#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1932 if (acpi_target_system_state() < ACPI_STATE_S3)
1933 return true;
1934#endif
1935 return false;
1936}
Sagar Kambleebc32822014-08-13 23:07:05 +05301937
Chris Wilson73b66f82018-05-25 10:26:29 +01001938static int i915_drm_prepare(struct drm_device *dev)
1939{
1940 struct drm_i915_private *i915 = to_i915(dev);
1941 int err;
1942
1943 /*
1944 * NB intel_display_suspend() may issue new requests after we've
1945 * ostensibly marked the GPU as ready-to-sleep here. We need to
1946 * split out that work and pull it forward so that after point,
1947 * the GPU is not woken again.
1948 */
1949 err = i915_gem_suspend(i915);
1950 if (err)
1951 dev_err(&i915->drm.pdev->dev,
1952 "GEM idle failed, suspend/resume might fail\n");
1953
1954 return err;
1955}
1956
Imre Deak5e365c32014-10-23 19:23:25 +03001957static int i915_drm_suspend(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001958{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001959 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03001960 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnese5747e32014-06-12 08:35:47 -07001961 pci_power_t opregion_target_state;
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001962
Imre Deak1f814da2015-12-16 02:52:19 +02001963 disable_rpm_wakeref_asserts(dev_priv);
1964
Paulo Zanonic67a4702013-08-19 13:18:09 -03001965 /* We do a lot of poking in a lot of registers, make sure they work
1966 * properly. */
Imre Deak2cd9a682018-08-16 15:37:57 +03001967 intel_power_domains_disable(dev_priv);
Paulo Zanonicb107992013-01-25 16:59:15 -02001968
Dave Airlie5bcf7192010-12-07 09:20:40 +10001969 drm_kms_helper_poll_disable(dev);
1970
David Weinehall52a05c32016-08-22 13:32:44 +03001971 pci_save_state(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001972
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02001973 intel_display_suspend(dev);
Daniel Vetterd5818932015-02-23 12:03:26 +01001974
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03001975 intel_dp_mst_suspend(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001976
1977 intel_runtime_pm_disable_interrupts(dev_priv);
1978 intel_hpd_cancel_work(dev_priv);
1979
1980 intel_suspend_encoders(dev_priv);
1981
Ville Syrjälä712bf362016-10-31 22:37:23 +02001982 intel_suspend_hw(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001983
Tvrtko Ursulin275a9912016-11-16 08:55:34 +00001984 i915_gem_suspend_gtt_mappings(dev_priv);
Ben Widawsky828c7902013-10-16 09:21:30 -07001985
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00001986 i915_save_state(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001987
Imre Deakbc872292015-11-18 17:32:30 +02001988 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Chris Wilsona950adc2018-10-30 11:05:54 +00001989 intel_opregion_suspend(dev_priv, opregion_target_state);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001990
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001991 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Dave Airlie3fa016a2012-03-28 10:48:49 +01001992
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001993 dev_priv->suspend_count++;
1994
Imre Deakf74ed082016-04-18 14:48:21 +03001995 intel_csr_ucode_suspend(dev_priv);
Imre Deakf514c2d2015-10-28 23:59:06 +02001996
Imre Deak1f814da2015-12-16 02:52:19 +02001997 enable_rpm_wakeref_asserts(dev_priv);
1998
Chris Wilson73b66f82018-05-25 10:26:29 +01001999 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002000}
2001
Imre Deak2cd9a682018-08-16 15:37:57 +03002002static enum i915_drm_suspend_mode
2003get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
2004{
2005 if (hibernate)
2006 return I915_DRM_SUSPEND_HIBERNATE;
2007
2008 if (suspend_to_idle(dev_priv))
2009 return I915_DRM_SUSPEND_IDLE;
2010
2011 return I915_DRM_SUSPEND_MEM;
2012}
2013
David Weinehallc49d13e2016-08-22 13:32:42 +03002014static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
Imre Deakc3c09c92014-10-23 19:23:15 +03002015{
David Weinehallc49d13e2016-08-22 13:32:42 +03002016 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03002017 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deakc3c09c92014-10-23 19:23:15 +03002018 int ret;
2019
Imre Deak1f814da2015-12-16 02:52:19 +02002020 disable_rpm_wakeref_asserts(dev_priv);
2021
Chris Wilsonec92ad02018-05-31 09:22:46 +01002022 i915_gem_suspend_late(dev_priv);
2023
Chris Wilsonec92ad02018-05-31 09:22:46 +01002024 intel_uncore_suspend(dev_priv);
Imre Deak4c494a52016-10-13 14:34:06 +03002025
Imre Deak2cd9a682018-08-16 15:37:57 +03002026 intel_power_domains_suspend(dev_priv,
2027 get_suspend_mode(dev_priv, hibernation));
Imre Deak73dfc222015-11-17 17:33:53 +02002028
Imre Deak507e1262016-04-20 20:27:54 +03002029 ret = 0;
Anusha Srivatsa3b6ac432018-10-31 13:27:26 -07002030 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03002031 bxt_enable_dc9(dev_priv);
Imre Deakb8aea3d12016-04-20 20:27:55 +03002032 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak507e1262016-04-20 20:27:54 +03002033 hsw_enable_pc8(dev_priv);
2034 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2035 ret = vlv_suspend_complete(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03002036
2037 if (ret) {
2038 DRM_ERROR("Suspend complete failed: %d\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03002039 intel_power_domains_resume(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03002040
Imre Deak1f814da2015-12-16 02:52:19 +02002041 goto out;
Imre Deakc3c09c92014-10-23 19:23:15 +03002042 }
2043
David Weinehall52a05c32016-08-22 13:32:44 +03002044 pci_disable_device(pdev);
Imre Deakab3be732015-03-02 13:04:41 +02002045 /*
Imre Deak54875572015-06-30 17:06:47 +03002046 * During hibernation on some platforms the BIOS may try to access
Imre Deakab3be732015-03-02 13:04:41 +02002047 * the device even though it's already in D3 and hang the machine. So
2048 * leave the device in D0 on those platforms and hope the BIOS will
Imre Deak54875572015-06-30 17:06:47 +03002049 * power down the device properly. The issue was seen on multiple old
2050 * GENs with different BIOS vendors, so having an explicit blacklist
2051 * is inpractical; apply the workaround on everything pre GEN6. The
2052 * platforms where the issue was seen:
2053 * Lenovo Thinkpad X301, X61s, X60, T60, X41
2054 * Fujitsu FSC S7110
2055 * Acer Aspire 1830T
Imre Deakab3be732015-03-02 13:04:41 +02002056 */
Tvrtko Ursulin514e1d62016-11-04 14:42:48 +00002057 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
David Weinehall52a05c32016-08-22 13:32:44 +03002058 pci_set_power_state(pdev, PCI_D3hot);
Imre Deakc3c09c92014-10-23 19:23:15 +03002059
Imre Deak1f814da2015-12-16 02:52:19 +02002060out:
2061 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonbd780f32019-01-14 14:21:09 +00002062 if (!dev_priv->uncore.user_forcewake.count)
2063 intel_runtime_pm_cleanup(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02002064
2065 return ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03002066}
2067
Matthew Aulda9a251c2016-12-02 10:24:11 +00002068static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002069{
2070 int error;
2071
Chris Wilsonded8b072016-07-05 10:40:22 +01002072 if (!dev) {
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002073 DRM_ERROR("dev: %p\n", dev);
Keith Packard1ae8c0a2009-06-28 15:42:17 -07002074 DRM_ERROR("DRM not initialized, aborting suspend.\n");
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10002075 return -ENODEV;
2076 }
2077
Imre Deak0b14cbd2014-09-10 18:16:55 +03002078 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
2079 state.event != PM_EVENT_FREEZE))
2080 return -EINVAL;
Dave Airlie5bcf7192010-12-07 09:20:40 +10002081
2082 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2083 return 0;
Chris Wilson6eecba32010-09-08 09:45:11 +01002084
Imre Deak5e365c32014-10-23 19:23:25 +03002085 error = i915_drm_suspend(dev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002086 if (error)
2087 return error;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10002088
Imre Deakab3be732015-03-02 13:04:41 +02002089 return i915_drm_suspend_late(dev, false);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10002090}
2091
Imre Deak5e365c32014-10-23 19:23:25 +03002092static int i915_drm_resume(struct drm_device *dev)
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10002093{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002094 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03002095 int ret;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01002096
Imre Deak1f814da2015-12-16 02:52:19 +02002097 disable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonabc80ab2016-08-24 10:27:01 +01002098 intel_sanitize_gt_powersave(dev_priv);
Imre Deak1f814da2015-12-16 02:52:19 +02002099
Chris Wilson12887862018-06-14 10:40:59 +01002100 i915_gem_sanitize(dev_priv);
2101
Chris Wilson97d6d7a2016-08-04 07:52:22 +01002102 ret = i915_ggtt_enable_hw(dev_priv);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03002103 if (ret)
2104 DRM_ERROR("failed to re-enable GGTT\n");
2105
Imre Deakf74ed082016-04-18 14:48:21 +03002106 intel_csr_ucode_resume(dev_priv);
2107
Tvrtko Ursulinaf6dc742016-12-01 14:16:44 +00002108 i915_restore_state(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +03002109 intel_pps_unlock_regs_wa(dev_priv);
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01002110
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02002111 intel_init_pch_refclk(dev_priv);
Chris Wilson1833b132012-05-09 11:56:28 +01002112
Peter Antoine364aece2015-05-11 08:50:45 +01002113 /*
2114 * Interrupts have to be enabled before any batches are run. If not the
2115 * GPU will hang. i915_gem_init_hw() will initiate batches to
2116 * update/restore the context.
2117 *
Imre Deak908764f2016-11-29 21:40:29 +02002118 * drm_mode_config_reset() needs AUX interrupts.
2119 *
Peter Antoine364aece2015-05-11 08:50:45 +01002120 * Modeset enabling in intel_modeset_init_hw() also needs working
2121 * interrupts.
2122 */
2123 intel_runtime_pm_enable_interrupts(dev_priv);
2124
Imre Deak908764f2016-11-29 21:40:29 +02002125 drm_mode_config_reset(dev);
2126
Chris Wilson37cd3302017-11-12 11:27:38 +00002127 i915_gem_resume(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01002128
Daniel Vetterd5818932015-02-23 12:03:26 +01002129 intel_modeset_init_hw(dev);
Ville Syrjälä675f7ff2017-11-16 18:02:15 +02002130 intel_init_clock_gating(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01002131
2132 spin_lock_irq(&dev_priv->irq_lock);
2133 if (dev_priv->display.hpd_irq_setup)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002134 dev_priv->display.hpd_irq_setup(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01002135 spin_unlock_irq(&dev_priv->irq_lock);
2136
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03002137 intel_dp_mst_resume(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01002138
Lyudea16b7652016-03-11 10:57:01 -05002139 intel_display_resume(dev);
2140
Lyudee0b70062016-11-01 21:06:30 -04002141 drm_kms_helper_poll_enable(dev);
2142
Daniel Vetterd5818932015-02-23 12:03:26 +01002143 /*
2144 * ... but also need to make sure that hotplug processing
2145 * doesn't cause havoc. Like in the driver load code we don't
Gwan-gyeong Munc444ad72018-08-03 19:41:50 +03002146 * bother with the tiny race here where we might lose hotplug
Daniel Vetterd5818932015-02-23 12:03:26 +01002147 * notifications.
2148 * */
2149 intel_hpd_init(dev_priv);
Jesse Barnes1daed3f2011-01-05 12:01:25 -08002150
Chris Wilsona950adc2018-10-30 11:05:54 +00002151 intel_opregion_resume(dev_priv);
Chris Wilson44834a62010-08-19 16:09:23 +01002152
Chris Wilson82e3b8c2014-08-13 13:09:46 +01002153 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Jesse Barnes073f34d2012-11-02 11:13:59 -07002154
Imre Deak2cd9a682018-08-16 15:37:57 +03002155 intel_power_domains_enable(dev_priv);
2156
Imre Deak1f814da2015-12-16 02:52:19 +02002157 enable_rpm_wakeref_asserts(dev_priv);
2158
Chris Wilson074c6ad2014-04-09 09:19:43 +01002159 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002160}
2161
Imre Deak5e365c32014-10-23 19:23:25 +03002162static int i915_drm_resume_early(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002163{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002164 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehall52a05c32016-08-22 13:32:44 +03002165 struct pci_dev *pdev = dev_priv->drm.pdev;
Imre Deak44410cd2016-04-18 14:45:54 +03002166 int ret;
Imre Deak36d61e62014-10-23 19:23:24 +03002167
Imre Deak76c4b252014-04-01 19:55:22 +03002168 /*
2169 * We have a resume ordering issue with the snd-hda driver also
2170 * requiring our device to be power up. Due to the lack of a
2171 * parent/child relationship we currently solve this with an early
2172 * resume hook.
2173 *
2174 * FIXME: This should be solved with a special hdmi sink device or
2175 * similar so that power domains can be employed.
2176 */
Imre Deak44410cd2016-04-18 14:45:54 +03002177
2178 /*
2179 * Note that we need to set the power state explicitly, since we
2180 * powered off the device during freeze and the PCI core won't power
2181 * it back up for us during thaw. Powering off the device during
2182 * freeze is not a hard requirement though, and during the
2183 * suspend/resume phases the PCI core makes sure we get here with the
2184 * device powered on. So in case we change our freeze logic and keep
2185 * the device powered we can also remove the following set power state
2186 * call.
2187 */
David Weinehall52a05c32016-08-22 13:32:44 +03002188 ret = pci_set_power_state(pdev, PCI_D0);
Imre Deak44410cd2016-04-18 14:45:54 +03002189 if (ret) {
2190 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03002191 return ret;
Imre Deak44410cd2016-04-18 14:45:54 +03002192 }
2193
2194 /*
2195 * Note that pci_enable_device() first enables any parent bridge
2196 * device and only then sets the power state for this device. The
2197 * bridge enabling is a nop though, since bridge devices are resumed
2198 * first. The order of enabling power and enabling the device is
2199 * imposed by the PCI core as described above, so here we preserve the
2200 * same order for the freeze/thaw phases.
2201 *
2202 * TODO: eventually we should remove pci_disable_device() /
2203 * pci_enable_enable_device() from suspend/resume. Due to how they
2204 * depend on the device enable refcount we can't anyway depend on them
2205 * disabling/enabling the device.
2206 */
Imre Deak2cd9a682018-08-16 15:37:57 +03002207 if (pci_enable_device(pdev))
2208 return -EIO;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002209
David Weinehall52a05c32016-08-22 13:32:44 +03002210 pci_set_master(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002211
Imre Deak1f814da2015-12-16 02:52:19 +02002212 disable_rpm_wakeref_asserts(dev_priv);
2213
Wayne Boyer666a4532015-12-09 12:29:35 -08002214 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002215 ret = vlv_resume_prepare(dev_priv, false);
Imre Deak36d61e62014-10-23 19:23:24 +03002216 if (ret)
Damien Lespiauff0b1872015-05-20 14:45:15 +01002217 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
2218 ret);
Imre Deak36d61e62014-10-23 19:23:24 +03002219
Hans de Goede68f60942017-02-10 11:28:01 +01002220 intel_uncore_resume_early(dev_priv);
Paulo Zanoniefee8332014-10-27 17:54:33 -02002221
Animesh Manna3e689282018-10-29 15:14:10 -07002222 if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
Imre Deak0f906032018-03-22 16:36:42 +02002223 gen9_sanitize_dc_state(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002224 bxt_disable_dc9(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03002225 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Damien Lespiaua9a6b732015-05-20 14:45:14 +01002226 hsw_disable_pc8(dev_priv);
Imre Deakda2f41d2016-04-20 20:27:56 +03002227 }
Paulo Zanoniefee8332014-10-27 17:54:33 -02002228
Chris Wilsondc979972016-05-10 14:10:04 +01002229 intel_uncore_sanitize(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02002230
Imre Deak2cd9a682018-08-16 15:37:57 +03002231 intel_power_domains_resume(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02002232
Chris Wilson55277e12019-01-03 11:21:04 +00002233 intel_engines_sanitize(dev_priv, true);
Chris Wilson4fdd5b42018-06-16 21:25:34 +01002234
Imre Deak6e35e8a2016-04-18 10:04:19 +03002235 enable_rpm_wakeref_asserts(dev_priv);
2236
Imre Deak36d61e62014-10-23 19:23:24 +03002237 return ret;
Imre Deak76c4b252014-04-01 19:55:22 +03002238}
2239
Tvrtko Ursulin7f26cb82016-12-01 14:16:41 +00002240static int i915_resume_switcheroo(struct drm_device *dev)
Imre Deak76c4b252014-04-01 19:55:22 +03002241{
Imre Deak50a00722014-10-23 19:23:17 +03002242 int ret;
Imre Deak76c4b252014-04-01 19:55:22 +03002243
Imre Deak097dd832014-10-23 19:23:19 +03002244 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2245 return 0;
2246
Imre Deak5e365c32014-10-23 19:23:25 +03002247 ret = i915_drm_resume_early(dev);
Imre Deak50a00722014-10-23 19:23:17 +03002248 if (ret)
2249 return ret;
2250
Imre Deak5a175142014-10-23 19:23:18 +03002251 return i915_drm_resume(dev);
2252}
2253
Chris Wilson73b66f82018-05-25 10:26:29 +01002254static int i915_pm_prepare(struct device *kdev)
2255{
2256 struct pci_dev *pdev = to_pci_dev(kdev);
2257 struct drm_device *dev = pci_get_drvdata(pdev);
2258
2259 if (!dev) {
2260 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2261 return -ENODEV;
2262 }
2263
2264 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2265 return 0;
2266
2267 return i915_drm_prepare(dev);
2268}
2269
David Weinehallc49d13e2016-08-22 13:32:42 +03002270static int i915_pm_suspend(struct device *kdev)
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002271{
David Weinehallc49d13e2016-08-22 13:32:42 +03002272 struct pci_dev *pdev = to_pci_dev(kdev);
2273 struct drm_device *dev = pci_get_drvdata(pdev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002274
David Weinehallc49d13e2016-08-22 13:32:42 +03002275 if (!dev) {
2276 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002277 return -ENODEV;
2278 }
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002279
David Weinehallc49d13e2016-08-22 13:32:42 +03002280 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10002281 return 0;
2282
David Weinehallc49d13e2016-08-22 13:32:42 +03002283 return i915_drm_suspend(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002284}
2285
David Weinehallc49d13e2016-08-22 13:32:42 +03002286static int i915_pm_suspend_late(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002287{
David Weinehallc49d13e2016-08-22 13:32:42 +03002288 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002289
2290 /*
Damien Lespiauc965d9952015-05-18 19:53:48 +01002291 * We have a suspend ordering issue with the snd-hda driver also
Imre Deak76c4b252014-04-01 19:55:22 +03002292 * requiring our device to be power up. Due to the lack of a
2293 * parent/child relationship we currently solve this with an late
2294 * suspend hook.
2295 *
2296 * FIXME: This should be solved with a special hdmi sink device or
2297 * similar so that power domains can be employed.
2298 */
David Weinehallc49d13e2016-08-22 13:32:42 +03002299 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak76c4b252014-04-01 19:55:22 +03002300 return 0;
Kristian Høgsberg112b7152009-01-04 16:55:33 -05002301
David Weinehallc49d13e2016-08-22 13:32:42 +03002302 return i915_drm_suspend_late(dev, false);
Imre Deakab3be732015-03-02 13:04:41 +02002303}
2304
David Weinehallc49d13e2016-08-22 13:32:42 +03002305static int i915_pm_poweroff_late(struct device *kdev)
Imre Deakab3be732015-03-02 13:04:41 +02002306{
David Weinehallc49d13e2016-08-22 13:32:42 +03002307 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deakab3be732015-03-02 13:04:41 +02002308
David Weinehallc49d13e2016-08-22 13:32:42 +03002309 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deakab3be732015-03-02 13:04:41 +02002310 return 0;
2311
David Weinehallc49d13e2016-08-22 13:32:42 +03002312 return i915_drm_suspend_late(dev, true);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002313}
2314
David Weinehallc49d13e2016-08-22 13:32:42 +03002315static int i915_pm_resume_early(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03002316{
David Weinehallc49d13e2016-08-22 13:32:42 +03002317 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Imre Deak76c4b252014-04-01 19:55:22 +03002318
David Weinehallc49d13e2016-08-22 13:32:42 +03002319 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002320 return 0;
2321
David Weinehallc49d13e2016-08-22 13:32:42 +03002322 return i915_drm_resume_early(dev);
Imre Deak76c4b252014-04-01 19:55:22 +03002323}
2324
David Weinehallc49d13e2016-08-22 13:32:42 +03002325static int i915_pm_resume(struct device *kdev)
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002326{
David Weinehallc49d13e2016-08-22 13:32:42 +03002327 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01002328
David Weinehallc49d13e2016-08-22 13:32:42 +03002329 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03002330 return 0;
2331
David Weinehallc49d13e2016-08-22 13:32:42 +03002332 return i915_drm_resume(dev);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002333}
2334
Chris Wilson1f19ac22016-05-14 07:26:32 +01002335/* freeze: before creating the hibernation_image */
David Weinehallc49d13e2016-08-22 13:32:42 +03002336static int i915_pm_freeze(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002337{
Imre Deakdd9f31c2017-08-16 17:46:07 +03002338 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Chris Wilson6a800ea2016-09-21 14:51:07 +01002339 int ret;
2340
Imre Deakdd9f31c2017-08-16 17:46:07 +03002341 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2342 ret = i915_drm_suspend(dev);
2343 if (ret)
2344 return ret;
2345 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01002346
2347 ret = i915_gem_freeze(kdev_to_i915(kdev));
2348 if (ret)
2349 return ret;
2350
2351 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002352}
2353
David Weinehallc49d13e2016-08-22 13:32:42 +03002354static int i915_pm_freeze_late(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002355{
Imre Deakdd9f31c2017-08-16 17:46:07 +03002356 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
Chris Wilson461fb992016-05-14 07:26:33 +01002357 int ret;
2358
Imre Deakdd9f31c2017-08-16 17:46:07 +03002359 if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
2360 ret = i915_drm_suspend_late(dev, true);
2361 if (ret)
2362 return ret;
2363 }
Chris Wilson461fb992016-05-14 07:26:33 +01002364
David Weinehallc49d13e2016-08-22 13:32:42 +03002365 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
Chris Wilson461fb992016-05-14 07:26:33 +01002366 if (ret)
2367 return ret;
2368
2369 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01002370}
2371
2372/* thaw: called after creating the hibernation image, but before turning off. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002373static int i915_pm_thaw_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002374{
David Weinehallc49d13e2016-08-22 13:32:42 +03002375 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002376}
2377
David Weinehallc49d13e2016-08-22 13:32:42 +03002378static int i915_pm_thaw(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002379{
David Weinehallc49d13e2016-08-22 13:32:42 +03002380 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002381}
2382
2383/* restore: called after loading the hibernation image. */
David Weinehallc49d13e2016-08-22 13:32:42 +03002384static int i915_pm_restore_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002385{
David Weinehallc49d13e2016-08-22 13:32:42 +03002386 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002387}
2388
David Weinehallc49d13e2016-08-22 13:32:42 +03002389static int i915_pm_restore(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01002390{
David Weinehallc49d13e2016-08-22 13:32:42 +03002391 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01002392}
2393
Imre Deakddeea5b2014-05-05 15:19:56 +03002394/*
2395 * Save all Gunit registers that may be lost after a D3 and a subsequent
2396 * S0i[R123] transition. The list of registers needing a save/restore is
2397 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2398 * registers in the following way:
2399 * - Driver: saved/restored by the driver
2400 * - Punit : saved/restored by the Punit firmware
2401 * - No, w/o marking: no need to save/restore, since the register is R/O or
2402 * used internally by the HW in a way that doesn't depend
2403 * keeping the content across a suspend/resume.
2404 * - Debug : used for debugging
2405 *
2406 * We save/restore all registers marked with 'Driver', with the following
2407 * exceptions:
2408 * - Registers out of use, including also registers marked with 'Debug'.
2409 * These have no effect on the driver's operation, so we don't save/restore
2410 * them to reduce the overhead.
2411 * - Registers that are fully setup by an initialization function called from
2412 * the resume path. For example many clock gating and RPS/RC6 registers.
2413 * - Registers that provide the right functionality with their reset defaults.
2414 *
2415 * TODO: Except for registers that based on the above 3 criteria can be safely
2416 * ignored, we save/restore all others, practically treating the HW context as
2417 * a black-box for the driver. Further investigation is needed to reduce the
2418 * saved/restored registers even further, by following the same 3 criteria.
2419 */
2420static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2421{
2422 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2423 int i;
2424
2425 /* GAM 0x4000-0x4770 */
2426 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2427 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2428 s->arb_mode = I915_READ(ARB_MODE);
2429 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2430 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2431
2432 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002433 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002434
2435 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
Imre Deakb5f1c972015-04-15 16:52:30 -07002436 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
Imre Deakddeea5b2014-05-05 15:19:56 +03002437
2438 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2439 s->ecochk = I915_READ(GAM_ECOCHK);
2440 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2441 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2442
2443 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2444
2445 /* MBC 0x9024-0x91D0, 0x8500 */
2446 s->g3dctl = I915_READ(VLV_G3DCTL);
2447 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2448 s->mbctl = I915_READ(GEN6_MBCTL);
2449
2450 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2451 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2452 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2453 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2454 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2455 s->rstctl = I915_READ(GEN6_RSTCTL);
2456 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2457
2458 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2459 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2460 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2461 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2462 s->ecobus = I915_READ(ECOBUS);
2463 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2464 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2465 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2466 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2467 s->rcedata = I915_READ(VLV_RCEDATA);
2468 s->spare2gh = I915_READ(VLV_SPAREG2H);
2469
2470 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2471 s->gt_imr = I915_READ(GTIMR);
2472 s->gt_ier = I915_READ(GTIER);
2473 s->pm_imr = I915_READ(GEN6_PMIMR);
2474 s->pm_ier = I915_READ(GEN6_PMIER);
2475
2476 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002477 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
Imre Deakddeea5b2014-05-05 15:19:56 +03002478
2479 /* GT SA CZ domain, 0x100000-0x138124 */
2480 s->tilectl = I915_READ(TILECTL);
2481 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2482 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2483 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2484 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2485
2486 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2487 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2488 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002489 s->pcbr = I915_READ(VLV_PCBR);
Imre Deakddeea5b2014-05-05 15:19:56 +03002490 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2491
2492 /*
2493 * Not saving any of:
2494 * DFT, 0x9800-0x9EC0
2495 * SARB, 0xB000-0xB1FC
2496 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2497 * PCI CFG
2498 */
2499}
2500
2501static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2502{
2503 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2504 u32 val;
2505 int i;
2506
2507 /* GAM 0x4000-0x4770 */
2508 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2509 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2510 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2511 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2512 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2513
2514 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002515 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002516
2517 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
Imre Deakb5f1c972015-04-15 16:52:30 -07002518 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
Imre Deakddeea5b2014-05-05 15:19:56 +03002519
2520 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2521 I915_WRITE(GAM_ECOCHK, s->ecochk);
2522 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2523 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2524
2525 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2526
2527 /* MBC 0x9024-0x91D0, 0x8500 */
2528 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2529 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2530 I915_WRITE(GEN6_MBCTL, s->mbctl);
2531
2532 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2533 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2534 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2535 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2536 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2537 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2538 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2539
2540 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2541 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2542 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2543 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2544 I915_WRITE(ECOBUS, s->ecobus);
2545 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2546 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2547 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2548 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2549 I915_WRITE(VLV_RCEDATA, s->rcedata);
2550 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2551
2552 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2553 I915_WRITE(GTIMR, s->gt_imr);
2554 I915_WRITE(GTIER, s->gt_ier);
2555 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2556 I915_WRITE(GEN6_PMIER, s->pm_ier);
2557
2558 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
Ville Syrjälä22dfe792015-09-18 20:03:16 +03002559 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
Imre Deakddeea5b2014-05-05 15:19:56 +03002560
2561 /* GT SA CZ domain, 0x100000-0x138124 */
2562 I915_WRITE(TILECTL, s->tilectl);
2563 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2564 /*
2565 * Preserve the GT allow wake and GFX force clock bit, they are not
2566 * be restored, as they are used to control the s0ix suspend/resume
2567 * sequence by the caller.
2568 */
2569 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2570 val &= VLV_GTLC_ALLOWWAKEREQ;
2571 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2572 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2573
2574 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2575 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2576 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2577 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2578
2579 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2580
2581 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2582 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2583 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
Jesse Barnes9c252102015-04-01 14:22:57 -07002584 I915_WRITE(VLV_PCBR, s->pcbr);
Imre Deakddeea5b2014-05-05 15:19:56 +03002585 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2586}
2587
Chris Wilson3dd14c02017-04-21 14:58:15 +01002588static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2589 u32 mask, u32 val)
2590{
Ville Syrjälä39806c3f2019-02-04 23:16:44 +02002591 i915_reg_t reg = VLV_GTLC_PW_STATUS;
2592 u32 reg_value;
2593 int ret;
2594
Chris Wilson3dd14c02017-04-21 14:58:15 +01002595 /* The HW does not like us polling for PW_STATUS frequently, so
2596 * use the sleeping loop rather than risk the busy spin within
2597 * intel_wait_for_register().
2598 *
2599 * Transitioning between RC6 states should be at most 2ms (see
2600 * valleyview_enable_rps) so use a 3ms timeout.
2601 */
Ville Syrjälä39806c3f2019-02-04 23:16:44 +02002602 ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
2603
2604 /* just trace the final value */
2605 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2606
2607 return ret;
Chris Wilson3dd14c02017-04-21 14:58:15 +01002608}
2609
Imre Deak650ad972014-04-18 16:35:02 +03002610int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2611{
2612 u32 val;
2613 int err;
2614
Imre Deak650ad972014-04-18 16:35:02 +03002615 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2616 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2617 if (force_on)
2618 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2619 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2620
2621 if (!force_on)
2622 return 0;
2623
Chris Wilsonc6ddc5f2016-06-30 15:32:46 +01002624 err = intel_wait_for_register(dev_priv,
2625 VLV_GTLC_SURVIVABILITY_REG,
2626 VLV_GFX_CLK_STATUS_BIT,
2627 VLV_GFX_CLK_STATUS_BIT,
2628 20);
Imre Deak650ad972014-04-18 16:35:02 +03002629 if (err)
2630 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2631 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2632
2633 return err;
Imre Deak650ad972014-04-18 16:35:02 +03002634}
2635
Imre Deakddeea5b2014-05-05 15:19:56 +03002636static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2637{
Chris Wilson3dd14c02017-04-21 14:58:15 +01002638 u32 mask;
Imre Deakddeea5b2014-05-05 15:19:56 +03002639 u32 val;
Chris Wilson3dd14c02017-04-21 14:58:15 +01002640 int err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002641
2642 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2643 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2644 if (allow)
2645 val |= VLV_GTLC_ALLOWWAKEREQ;
2646 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2647 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2648
Chris Wilson3dd14c02017-04-21 14:58:15 +01002649 mask = VLV_GTLC_ALLOWWAKEACK;
2650 val = allow ? mask : 0;
2651
2652 err = vlv_wait_for_pw_status(dev_priv, mask, val);
Imre Deakddeea5b2014-05-05 15:19:56 +03002653 if (err)
2654 DRM_ERROR("timeout disabling GT waking\n");
Chris Wilsonb2736692016-06-30 15:32:47 +01002655
Imre Deakddeea5b2014-05-05 15:19:56 +03002656 return err;
Imre Deakddeea5b2014-05-05 15:19:56 +03002657}
2658
Chris Wilson3dd14c02017-04-21 14:58:15 +01002659static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2660 bool wait_for_on)
Imre Deakddeea5b2014-05-05 15:19:56 +03002661{
2662 u32 mask;
2663 u32 val;
Imre Deakddeea5b2014-05-05 15:19:56 +03002664
2665 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2666 val = wait_for_on ? mask : 0;
Imre Deakddeea5b2014-05-05 15:19:56 +03002667
2668 /*
2669 * RC6 transitioning can be delayed up to 2 msec (see
2670 * valleyview_enable_rps), use 3 msec for safety.
Chris Wilsone01569a2018-04-09 10:49:05 +01002671 *
2672 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2673 * reset and we are trying to force the machine to sleep.
Imre Deakddeea5b2014-05-05 15:19:56 +03002674 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002675 if (vlv_wait_for_pw_status(dev_priv, mask, val))
Chris Wilsone01569a2018-04-09 10:49:05 +01002676 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2677 onoff(wait_for_on));
Imre Deakddeea5b2014-05-05 15:19:56 +03002678}
2679
2680static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2681{
2682 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2683 return;
2684
Daniel Vetter6fa283b2016-01-19 21:00:56 +01002685 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
Imre Deakddeea5b2014-05-05 15:19:56 +03002686 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2687}
2688
Sagar Kambleebc32822014-08-13 23:07:05 +05302689static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
Imre Deakddeea5b2014-05-05 15:19:56 +03002690{
2691 u32 mask;
2692 int err;
2693
2694 /*
2695 * Bspec defines the following GT well on flags as debug only, so
2696 * don't treat them as hard failures.
2697 */
Chris Wilson3dd14c02017-04-21 14:58:15 +01002698 vlv_wait_for_gt_wells(dev_priv, false);
Imre Deakddeea5b2014-05-05 15:19:56 +03002699
2700 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2701 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2702
2703 vlv_check_no_gt_access(dev_priv);
2704
2705 err = vlv_force_gfx_clock(dev_priv, true);
2706 if (err)
2707 goto err1;
2708
2709 err = vlv_allow_gt_wake(dev_priv, false);
2710 if (err)
2711 goto err2;
Deepak S98711162014-12-12 14:18:16 +05302712
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002713 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302714 vlv_save_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002715
2716 err = vlv_force_gfx_clock(dev_priv, false);
2717 if (err)
2718 goto err2;
2719
2720 return 0;
2721
2722err2:
2723 /* For safety always re-enable waking and disable gfx clock forcing */
2724 vlv_allow_gt_wake(dev_priv, true);
2725err1:
2726 vlv_force_gfx_clock(dev_priv, false);
2727
2728 return err;
2729}
2730
Sagar Kamble016970b2014-08-13 23:07:06 +05302731static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2732 bool rpm_resume)
Imre Deakddeea5b2014-05-05 15:19:56 +03002733{
Imre Deakddeea5b2014-05-05 15:19:56 +03002734 int err;
2735 int ret;
2736
2737 /*
2738 * If any of the steps fail just try to continue, that's the best we
2739 * can do at this point. Return the first error code (which will also
2740 * leave RPM permanently disabled).
2741 */
2742 ret = vlv_force_gfx_clock(dev_priv, true);
2743
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002744 if (!IS_CHERRYVIEW(dev_priv))
Deepak S98711162014-12-12 14:18:16 +05302745 vlv_restore_gunit_s0ix_state(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002746
2747 err = vlv_allow_gt_wake(dev_priv, true);
2748 if (!ret)
2749 ret = err;
2750
2751 err = vlv_force_gfx_clock(dev_priv, false);
2752 if (!ret)
2753 ret = err;
2754
2755 vlv_check_no_gt_access(dev_priv);
2756
Chris Wilson7c108fd2016-10-24 13:42:18 +01002757 if (rpm_resume)
Ville Syrjälä46f16e62016-10-31 22:37:22 +02002758 intel_init_clock_gating(dev_priv);
Imre Deakddeea5b2014-05-05 15:19:56 +03002759
2760 return ret;
2761}
2762
David Weinehallc49d13e2016-08-22 13:32:42 +03002763static int intel_runtime_suspend(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002764{
David Weinehallc49d13e2016-08-22 13:32:42 +03002765 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002766 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002767 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002768 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002769
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00002770 if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
Imre Deakc6df39b2014-04-14 20:24:29 +03002771 return -ENODEV;
2772
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002773 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002774 return -ENODEV;
2775
Paulo Zanoni8a187452013-12-06 20:32:13 -02002776 DRM_DEBUG_KMS("Suspending device\n");
2777
Imre Deak1f814da2015-12-16 02:52:19 +02002778 disable_rpm_wakeref_asserts(dev_priv);
2779
Imre Deakd6102972014-05-07 19:57:49 +03002780 /*
2781 * We are safe here against re-faults, since the fault handler takes
2782 * an RPM reference.
2783 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01002784 i915_gem_runtime_suspend(dev_priv);
Imre Deakd6102972014-05-07 19:57:49 +03002785
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002786 intel_uc_suspend(dev_priv);
Alex Daia1c41992015-09-30 09:46:37 -07002787
Imre Deak2eb52522014-11-19 15:30:05 +02002788 intel_runtime_pm_disable_interrupts(dev_priv);
Imre Deakb5478bc2014-04-14 20:24:37 +03002789
Hans de Goede01c799c2017-11-14 14:55:18 +01002790 intel_uncore_suspend(dev_priv);
2791
Imre Deak507e1262016-04-20 20:27:54 +03002792 ret = 0;
Animesh Manna3e689282018-10-29 15:14:10 -07002793 if (INTEL_GEN(dev_priv) >= 11) {
2794 icl_display_core_uninit(dev_priv);
2795 bxt_enable_dc9(dev_priv);
2796 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002797 bxt_display_core_uninit(dev_priv);
2798 bxt_enable_dc9(dev_priv);
2799 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2800 hsw_enable_pc8(dev_priv);
2801 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2802 ret = vlv_suspend_complete(dev_priv);
2803 }
2804
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002805 if (ret) {
2806 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
Hans de Goede01c799c2017-11-14 14:55:18 +01002807 intel_uncore_runtime_resume(dev_priv);
2808
Daniel Vetterb9632912014-09-30 10:56:44 +02002809 intel_runtime_pm_enable_interrupts(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002810
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002811 intel_uc_resume(dev_priv);
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302812
2813 i915_gem_init_swizzling(dev_priv);
2814 i915_gem_restore_fences(dev_priv);
2815
Imre Deak1f814da2015-12-16 02:52:19 +02002816 enable_rpm_wakeref_asserts(dev_priv);
2817
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002818 return ret;
2819 }
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002820
Imre Deak1f814da2015-12-16 02:52:19 +02002821 enable_rpm_wakeref_asserts(dev_priv);
Chris Wilsonbd780f32019-01-14 14:21:09 +00002822 intel_runtime_pm_cleanup(dev_priv);
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002823
Mika Kuoppalabc3b9342016-01-08 15:51:20 +02002824 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002825 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2826
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002827 dev_priv->runtime_pm.suspended = true;
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002828
2829 /*
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002830 * FIXME: We really should find a document that references the arguments
2831 * used below!
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08002832 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002833 if (IS_BROADWELL(dev_priv)) {
Paulo Zanonid37ae192015-07-30 18:20:29 -03002834 /*
2835 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2836 * being detected, and the call we do at intel_runtime_resume()
2837 * won't be able to restore them. Since PCI_D3hot matches the
2838 * actual specification and appears to be working, use it.
2839 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002840 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
Paulo Zanonid37ae192015-07-30 18:20:29 -03002841 } else {
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002842 /*
2843 * current versions of firmware which depend on this opregion
2844 * notification have repurposed the D1 definition to mean
2845 * "runtime suspended" vs. what you would normally expect (D3)
2846 * to distinguish it from notifications that might be sent via
2847 * the suspend path.
2848 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002849 intel_opregion_notify_adapter(dev_priv, PCI_D1);
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03002850 }
Paulo Zanoni8a187452013-12-06 20:32:13 -02002851
Mika Kuoppala59bad942015-01-16 11:34:40 +02002852 assert_forcewakes_inactive(dev_priv);
Chris Wilsondc9fb092015-01-16 11:34:34 +02002853
Ander Conselvan de Oliveira21d6e0b2017-01-20 16:28:43 +02002854 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Lyude19625e82016-06-21 17:03:44 -04002855 intel_hpd_poll_init(dev_priv);
2856
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03002857 DRM_DEBUG_KMS("Device suspended\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002858 return 0;
2859}
2860
David Weinehallc49d13e2016-08-22 13:32:42 +03002861static int intel_runtime_resume(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02002862{
David Weinehallc49d13e2016-08-22 13:32:42 +03002863 struct pci_dev *pdev = to_pci_dev(kdev);
Paulo Zanoni8a187452013-12-06 20:32:13 -02002864 struct drm_device *dev = pci_get_drvdata(pdev);
Chris Wilsonfac5e232016-07-04 11:34:36 +01002865 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002866 int ret = 0;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002867
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01002868 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03002869 return -ENODEV;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002870
2871 DRM_DEBUG_KMS("Resuming device\n");
2872
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002873 WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
Imre Deak1f814da2015-12-16 02:52:19 +02002874 disable_rpm_wakeref_asserts(dev_priv);
2875
Chris Wilson6f9f4b72016-05-23 15:08:09 +01002876 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01002877 dev_priv->runtime_pm.suspended = false;
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02002878 if (intel_uncore_unclaimed_mmio(dev_priv))
2879 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02002880
Animesh Manna3e689282018-10-29 15:14:10 -07002881 if (INTEL_GEN(dev_priv) >= 11) {
2882 bxt_disable_dc9(dev_priv);
2883 icl_display_core_init(dev_priv, true);
2884 if (dev_priv->csr.dmc_payload) {
2885 if (dev_priv->csr.allowed_dc_mask &
2886 DC_STATE_EN_UPTO_DC6)
2887 skl_enable_dc6(dev_priv);
2888 else if (dev_priv->csr.allowed_dc_mask &
2889 DC_STATE_EN_UPTO_DC5)
2890 gen9_enable_dc5(dev_priv);
2891 }
2892 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deak507e1262016-04-20 20:27:54 +03002893 bxt_disable_dc9(dev_priv);
2894 bxt_display_core_init(dev_priv, true);
Imre Deakf62c79b2016-04-20 20:27:57 +03002895 if (dev_priv->csr.dmc_payload &&
2896 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2897 gen9_enable_dc5(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002898 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002899 hsw_disable_pc8(dev_priv);
Imre Deak507e1262016-04-20 20:27:54 +03002900 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002901 ret = vlv_resume_prepare(dev_priv, true);
Imre Deak507e1262016-04-20 20:27:54 +03002902 }
Paulo Zanoni1a5df182014-10-27 17:54:32 -02002903
Hans de Goedebedf4d72017-11-14 14:55:17 +01002904 intel_uncore_runtime_resume(dev_priv);
2905
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302906 intel_runtime_pm_enable_interrupts(dev_priv);
2907
Michal Wajdeczko7cfca4a2018-03-02 11:15:49 +00002908 intel_uc_resume(dev_priv);
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05302909
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002910 /*
2911 * No point of rolling back things in case of an error, as the best
2912 * we can do is to hope that things will still work (and disable RPM).
2913 */
Tvrtko Ursulinc6be6072016-11-16 08:55:31 +00002914 i915_gem_init_swizzling(dev_priv);
Chris Wilson83bf6d52017-02-03 12:57:17 +00002915 i915_gem_restore_fences(dev_priv);
Imre Deak92b806d2014-04-14 20:24:39 +03002916
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002917 /*
2918 * On VLV/CHV display interrupts are part of the display
2919 * power well, so hpd is reinitialized from there. For
2920 * everyone else do it here.
2921 */
Wayne Boyer666a4532015-12-09 12:29:35 -08002922 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä08d8a232015-08-27 23:56:08 +03002923 intel_hpd_init(dev_priv);
2924
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +05302925 intel_enable_ipc(dev_priv);
2926
Imre Deak1f814da2015-12-16 02:52:19 +02002927 enable_rpm_wakeref_asserts(dev_priv);
2928
Imre Deak0ab9cfe2014-04-15 16:39:45 +03002929 if (ret)
2930 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2931 else
2932 DRM_DEBUG_KMS("Device resumed\n");
2933
2934 return ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02002935}
2936
Chris Wilson42f55512016-06-24 14:00:26 +01002937const struct dev_pm_ops i915_pm_ops = {
Imre Deak5545dbb2014-10-23 19:23:28 +03002938 /*
2939 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2940 * PMSG_RESUME]
2941 */
Chris Wilson73b66f82018-05-25 10:26:29 +01002942 .prepare = i915_pm_prepare,
Akshay Joshi0206e352011-08-16 15:34:10 -04002943 .suspend = i915_pm_suspend,
Imre Deak76c4b252014-04-01 19:55:22 +03002944 .suspend_late = i915_pm_suspend_late,
2945 .resume_early = i915_pm_resume_early,
Akshay Joshi0206e352011-08-16 15:34:10 -04002946 .resume = i915_pm_resume,
Imre Deak5545dbb2014-10-23 19:23:28 +03002947
2948 /*
2949 * S4 event handlers
2950 * @freeze, @freeze_late : called (1) before creating the
2951 * hibernation image [PMSG_FREEZE] and
2952 * (2) after rebooting, before restoring
2953 * the image [PMSG_QUIESCE]
2954 * @thaw, @thaw_early : called (1) after creating the hibernation
2955 * image, before writing it [PMSG_THAW]
2956 * and (2) after failing to create or
2957 * restore the image [PMSG_RECOVER]
2958 * @poweroff, @poweroff_late: called after writing the hibernation
2959 * image, before rebooting [PMSG_HIBERNATE]
2960 * @restore, @restore_early : called after rebooting and restoring the
2961 * hibernation image [PMSG_RESTORE]
2962 */
Chris Wilson1f19ac22016-05-14 07:26:32 +01002963 .freeze = i915_pm_freeze,
2964 .freeze_late = i915_pm_freeze_late,
2965 .thaw_early = i915_pm_thaw_early,
2966 .thaw = i915_pm_thaw,
Imre Deak36d61e62014-10-23 19:23:24 +03002967 .poweroff = i915_pm_suspend,
Imre Deakab3be732015-03-02 13:04:41 +02002968 .poweroff_late = i915_pm_poweroff_late,
Chris Wilson1f19ac22016-05-14 07:26:32 +01002969 .restore_early = i915_pm_restore_early,
2970 .restore = i915_pm_restore,
Imre Deak5545dbb2014-10-23 19:23:28 +03002971
2972 /* S0ix (via runtime suspend) event handlers */
Paulo Zanoni97bea202014-03-07 20:12:33 -03002973 .runtime_suspend = intel_runtime_suspend,
2974 .runtime_resume = intel_runtime_resume,
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08002975};
2976
Laurent Pinchart78b68552012-05-17 13:27:22 +02002977static const struct vm_operations_struct i915_gem_vm_ops = {
Jesse Barnesde151cf2008-11-12 10:03:55 -08002978 .fault = i915_gem_fault,
Jesse Barnesab00b3e2009-02-11 14:01:46 -08002979 .open = drm_gem_vm_open,
2980 .close = drm_gem_vm_close,
Jesse Barnesde151cf2008-11-12 10:03:55 -08002981};
2982
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002983static const struct file_operations i915_driver_fops = {
2984 .owner = THIS_MODULE,
2985 .open = drm_open,
2986 .release = drm_release,
2987 .unlocked_ioctl = drm_ioctl,
2988 .mmap = drm_gem_mmap,
2989 .poll = drm_poll,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002990 .read = drm_read,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002991 .compat_ioctl = i915_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07002992 .llseek = noop_llseek,
2993};
2994
Chris Wilson0673ad42016-06-24 14:00:22 +01002995static int
2996i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2997 struct drm_file *file)
2998{
2999 return -ENODEV;
3000}
3001
3002static const struct drm_ioctl_desc i915_ioctls[] = {
3003 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3004 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
3005 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
3006 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
3007 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
3008 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02003009 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01003010 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3011 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
3012 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
3013 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3014 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3015 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3016 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3017 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
3018 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
3019 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3020 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02003021 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
3022 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01003023 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
3024 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
3025 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3026 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
3027 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
3028 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3029 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3030 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3031 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
3032 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
3033 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
3034 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
3035 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
3036 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
3037 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
Chris Wilson111dbca2017-01-10 12:10:44 +00003038 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
3039 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01003040 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02003041 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
Chris Wilson0673ad42016-06-24 14:00:22 +01003042 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
Daniel Vetter0cd54b02018-04-20 08:51:57 +02003043 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
3044 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
3045 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
3046 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
Chris Wilson0673ad42016-06-24 14:00:22 +01003047 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
3048 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
3049 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
3050 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
3051 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
3052 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
3053 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
3054 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
Robert Braggeec688e2016-11-07 19:49:47 +00003055 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003056 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3057 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
Lionel Landwerlina446ae22018-03-06 12:28:56 +00003058 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01003059};
3060
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061static struct drm_driver driver = {
Michael Witten0c547812011-08-25 17:55:54 +00003062 /* Don't use MTRRs here; the Xserver or userspace app should
3063 * deal with them for Intel hardware.
Dave Airlie792d2b92005-11-11 23:30:27 +11003064 */
Eric Anholt673a3942008-07-30 12:06:12 -07003065 .driver_features =
Daniel Vetter1ff49482019-01-29 11:42:48 +01003066 DRIVER_GEM | DRIVER_PRIME |
Jason Ekstrandcf6e7ba2017-08-15 15:57:33 +01003067 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
Chris Wilsoncad36882017-02-10 16:35:21 +00003068 .release = i915_driver_release,
Eric Anholt673a3942008-07-30 12:06:12 -07003069 .open = i915_driver_open,
Dave Airlie22eae942005-11-10 22:16:34 +11003070 .lastclose = i915_driver_lastclose,
Eric Anholt673a3942008-07-30 12:06:12 -07003071 .postclose = i915_driver_postclose,
Rafael J. Wysockid8e29202010-01-09 00:45:33 +01003072
Chris Wilsonb1f788c2016-08-04 07:52:45 +01003073 .gem_close_object = i915_gem_close_object,
Chris Wilsonf0cd5182016-10-28 13:58:43 +01003074 .gem_free_object_unlocked = i915_gem_free_object,
Jesse Barnesde151cf2008-11-12 10:03:55 -08003075 .gem_vm_ops = &i915_gem_vm_ops,
Daniel Vetter1286ff72012-05-10 15:25:09 +02003076
3077 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
3078 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
3079 .gem_prime_export = i915_gem_prime_export,
3080 .gem_prime_import = i915_gem_prime_import,
3081
Dave Airlieff72145b2011-02-07 12:16:14 +10003082 .dumb_create = i915_gem_dumb_create,
Dave Airlieda6b51d2014-12-24 13:11:17 +10003083 .dumb_map_offset = i915_gem_mmap_gtt,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 .ioctls = i915_ioctls,
Chris Wilson0673ad42016-06-24 14:00:22 +01003085 .num_ioctls = ARRAY_SIZE(i915_ioctls),
Arjan van de Vene08e96d2011-10-31 07:28:57 -07003086 .fops = &i915_driver_fops,
Dave Airlie22eae942005-11-10 22:16:34 +11003087 .name = DRIVER_NAME,
3088 .desc = DRIVER_DESC,
3089 .date = DRIVER_DATE,
3090 .major = DRIVER_MAJOR,
3091 .minor = DRIVER_MINOR,
3092 .patchlevel = DRIVER_PATCHLEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093};
Chris Wilson66d9cb52017-02-13 17:15:17 +00003094
3095#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3096#include "selftests/mock_drm.c"
3097#endif