blob: 6817c612eb78194afd6f3106f404eb6869a5b582 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jesse Barnese5747e32014-06-12 08:35:47 -070030#include <linux/acpi.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010031#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010039#include <linux/vga_switcheroo.h>
40#include <linux/vt.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010041
Thomas Zimmermann6848c292021-04-12 15:10:42 +020042#include <drm/drm_aperture.h>
Maarten Lankhorsta667fb42016-12-15 15:29:44 +010043#include <drm/drm_atomic_helper.h>
Sam Ravnborgd0e93592019-01-26 13:25:24 +010044#include <drm/drm_ioctl.h>
45#include <drm/drm_irq.h>
Daniel Vetter7fb81e92020-03-23 15:49:07 +010046#include <drm/drm_managed.h>
Sam Ravnborgd0e93592019-01-26 13:25:24 +010047#include <drm/drm_probe_helper.h>
Chris Wilson0673ad42016-06-24 14:00:22 +010048
Jani Nikuladf0566a2019-06-13 11:44:16 +030049#include "display/intel_acpi.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030050#include "display/intel_bw.h"
51#include "display/intel_cdclk.h"
Anusha Srivatsa32f94022021-05-18 14:34:44 -070052#include "display/intel_dmc.h"
Jani Nikula1d455f82019-08-06 14:39:33 +030053#include "display/intel_display_types.h"
Jani Nikula379bc102019-06-13 11:44:15 +030054#include "display/intel_dp.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030055#include "display/intel_fbdev.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030056#include "display/intel_hotplug.h"
57#include "display/intel_overlay.h"
58#include "display/intel_pipe_crc.h"
Jani Nikula0bf1e5a2021-01-20 12:18:32 +020059#include "display/intel_pps.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030060#include "display/intel_sprite.h"
Jani Nikula4fb87832019-10-01 18:25:06 +030061#include "display/intel_vga.h"
Jani Nikula379bc102019-06-13 11:44:15 +030062
Chris Wilson10be98a2019-05-28 10:29:49 +010063#include "gem/i915_gem_context.h"
Chris Wilsonafa13082019-05-28 10:29:43 +010064#include "gem/i915_gem_ioctls.h"
Abdiel Janulguecc662122019-12-04 12:00:32 +000065#include "gem/i915_gem_mman.h"
Chris Wilson29d88082021-01-23 14:55:43 +000066#include "gem/i915_gem_pm.h"
Tvrtko Ursulin24635c52019-06-21 08:07:41 +010067#include "gt/intel_gt.h"
Chris Wilson79ffac852019-04-24 21:07:17 +010068#include "gt/intel_gt_pm.h"
Imre Deak2248a282019-10-17 16:38:31 +030069#include "gt/intel_rc6.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010070
Jani Nikula2126d3e2019-05-02 18:02:43 +030071#include "i915_debugfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include "i915_drv.h"
Jani Nikula062705b2020-02-27 19:00:45 +020073#include "i915_ioc32.h"
Jani Nikula440e2b32019-04-29 15:29:27 +030074#include "i915_irq.h"
Jani Nikula9c9082b2019-08-08 16:42:47 +030075#include "i915_memcpy.h"
Jani Nikuladb94e9f2019-08-08 16:42:44 +030076#include "i915_perf.h"
Lionel Landwerlina446ae22018-03-06 12:28:56 +000077#include "i915_query.h"
Jani Nikulabdd15102019-08-08 16:42:46 +030078#include "i915_suspend.h"
Jani Nikula63bf8302019-10-04 15:20:18 +030079#include "i915_switcheroo.h"
Jani Nikulabe682612019-08-08 16:42:45 +030080#include "i915_sysfs.h"
Jani Nikula331c2012019-04-05 14:00:03 +030081#include "i915_trace.h"
Chris Wilson0673ad42016-06-24 14:00:22 +010082#include "i915_vgpu.h"
Jani Nikulad28ae3b2020-02-25 13:15:07 +020083#include "intel_dram.h"
Jani Nikula6e482b92020-02-27 16:44:08 +020084#include "intel_gvt.h"
Chris Wilson3fc794f2019-10-26 21:20:32 +010085#include "intel_memory_region.h"
Jani Nikula696173b2019-04-05 14:00:15 +030086#include "intel_pm.h"
Matt Roperf9c730ed2020-09-30 23:39:17 -070087#include "intel_sideband.h"
Jani Nikulafb5f4322020-02-12 16:40:57 +020088#include "vlv_suspend.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Daniel Vetter70a59dd2020-11-04 11:04:24 +010090static const struct drm_driver driver;
Kristian Høgsberg112b7152009-01-04 16:55:33 -050091
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +000092static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +010093{
Thomas Zimmermann8ff54462021-01-28 14:31:23 +010094 int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
Sinan Kaya57b296462017-11-27 11:57:46 -050095
96 dev_priv->bridge_dev =
97 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
Chris Wilson0673ad42016-06-24 14:00:22 +010098 if (!dev_priv->bridge_dev) {
Wambui Karuga00376cc2020-01-31 12:34:12 +030099 drm_err(&dev_priv->drm, "bridge device not found\n");
Chris Wilson0673ad42016-06-24 14:00:22 +0100100 return -1;
101 }
102 return 0;
103}
104
105/* Allocate space for the MCH regs if needed, return nonzero on error */
106static int
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000107intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100108{
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700109 int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100110 u32 temp_lo, temp_hi = 0;
111 u64 mchbar_addr;
112 int ret;
113
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700114 if (GRAPHICS_VER(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100115 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
116 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
117 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
118
119 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
120#ifdef CONFIG_PNP
121 if (mchbar_addr &&
122 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
123 return 0;
124#endif
125
126 /* Get some space for it */
127 dev_priv->mch_res.name = "i915 MCHBAR";
128 dev_priv->mch_res.flags = IORESOURCE_MEM;
129 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
130 &dev_priv->mch_res,
131 MCHBAR_SIZE, MCHBAR_SIZE,
132 PCIBIOS_MIN_MEM,
133 0, pcibios_align_resource,
134 dev_priv->bridge_dev);
135 if (ret) {
Wambui Karuga00376cc2020-01-31 12:34:12 +0300136 drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
Chris Wilson0673ad42016-06-24 14:00:22 +0100137 dev_priv->mch_res.start = 0;
138 return ret;
139 }
140
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700141 if (GRAPHICS_VER(dev_priv) >= 4)
Chris Wilson0673ad42016-06-24 14:00:22 +0100142 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
143 upper_32_bits(dev_priv->mch_res.start));
144
145 pci_write_config_dword(dev_priv->bridge_dev, reg,
146 lower_32_bits(dev_priv->mch_res.start));
147 return 0;
148}
149
150/* Setup MCHBAR if possible, return true if we should disable it again */
151static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000152intel_setup_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100153{
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700154 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100155 u32 temp;
156 bool enabled;
157
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100158 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100159 return;
160
161 dev_priv->mchbar_need_disable = false;
162
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100163 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100164 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
165 enabled = !!(temp & DEVEN_MCHBAR_EN);
166 } else {
167 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
168 enabled = temp & 1;
169 }
170
171 /* If it's already enabled, don't have to do anything */
172 if (enabled)
173 return;
174
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000175 if (intel_alloc_mchbar_resource(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100176 return;
177
178 dev_priv->mchbar_need_disable = true;
179
180 /* Space is allocated or reserved, so enable it. */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100181 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100182 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
183 temp | DEVEN_MCHBAR_EN);
184 } else {
185 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
186 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
187 }
188}
189
190static void
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000191intel_teardown_mchbar(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100192{
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700193 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100194
195 if (dev_priv->mchbar_need_disable) {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +0100196 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
Chris Wilson0673ad42016-06-24 14:00:22 +0100197 u32 deven_val;
198
199 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
200 &deven_val);
201 deven_val &= ~DEVEN_MCHBAR_EN;
202 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
203 deven_val);
204 } else {
205 u32 mchbar_val;
206
207 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
208 &mchbar_val);
209 mchbar_val &= ~1;
210 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
211 mchbar_val);
212 }
213 }
214
215 if (dev_priv->mch_res.start)
216 release_resource(&dev_priv->mch_res);
217}
218
Chris Wilson0673ad42016-06-24 14:00:22 +0100219static int i915_workqueues_init(struct drm_i915_private *dev_priv)
220{
221 /*
222 * The i915 workqueue is primarily used for batched retirement of
223 * requests (and thus managing bo) once the task has been completed
Chris Wilsone61e0f52018-02-21 09:56:36 +0000224 * by the GPU. i915_retire_requests() is called directly when we
Chris Wilson0673ad42016-06-24 14:00:22 +0100225 * need high-priority retirement, such as waiting for an explicit
226 * bo.
227 *
228 * It is also used for periodic low-priority events, such as
229 * idle-timers and recording error state.
230 *
231 * All tasks on the workqueue are expected to acquire the dev mutex
232 * so there is no point in running more than one instance of the
233 * workqueue at any time. Use an ordered one.
234 */
235 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
236 if (dev_priv->wq == NULL)
237 goto out_err;
238
239 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
240 if (dev_priv->hotplug.dp_wq == NULL)
241 goto out_free_wq;
242
Chris Wilson0673ad42016-06-24 14:00:22 +0100243 return 0;
244
Chris Wilson0673ad42016-06-24 14:00:22 +0100245out_free_wq:
246 destroy_workqueue(dev_priv->wq);
247out_err:
Wambui Karuga00376cc2020-01-31 12:34:12 +0300248 drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
Chris Wilson0673ad42016-06-24 14:00:22 +0100249
250 return -ENOMEM;
251}
252
253static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
254{
Chris Wilson0673ad42016-06-24 14:00:22 +0100255 destroy_workqueue(dev_priv->hotplug.dp_wq);
256 destroy_workqueue(dev_priv->wq);
257}
258
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300259/*
260 * We don't keep the workarounds for pre-production hardware, so we expect our
261 * driver to fail on these machines in one way or another. A little warning on
262 * dmesg may help both the user and the bug triagers.
Chris Wilson6a7a6a92017-11-17 10:26:35 +0000263 *
264 * Our policy for removing pre-production workarounds is to keep the
265 * current gen workarounds as a guide to the bring-up of the next gen
266 * (workarounds have a habit of persisting!). Anything older than that
267 * should be removed along with the complications they introduce.
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300268 */
269static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
270{
Chris Wilson248a1242017-01-30 10:44:56 +0000271 bool pre = false;
272
273 pre |= IS_HSW_EARLY_SDV(dev_priv);
274 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
Chris Wilson0102ba12017-01-30 10:44:58 +0000275 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
Jani Nikulaef47b7a2021-03-26 15:21:34 +0200276 pre |= IS_KBL_GT_STEP(dev_priv, 0, STEP_A0);
Ville Syrjälä834c6bb2020-01-28 17:51:52 +0200277 pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
Chris Wilson248a1242017-01-30 10:44:56 +0000278
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000279 if (pre) {
Wambui Karuga00376cc2020-01-31 12:34:12 +0300280 drm_err(&dev_priv->drm, "This is a pre-production stepping. "
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300281 "It may not be fully functional.\n");
Chris Wilson7c5ff4a2017-01-30 10:44:57 +0000282 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
283 }
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300284}
285
Chris Wilson640b50f2019-12-28 11:12:55 +0000286static void sanitize_gpu(struct drm_i915_private *i915)
287{
288 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
289 __intel_gt_reset(&i915->gt, ALL_ENGINES);
290}
291
Chris Wilson0673ad42016-06-24 14:00:22 +0100292/**
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200293 * i915_driver_early_probe - setup state not requiring device access
Chris Wilson0673ad42016-06-24 14:00:22 +0100294 * @dev_priv: device private
295 *
296 * Initialize everything that is a "SW-only" state, that is state not
297 * requiring accessing the device or exposing the driver via kernel internal
298 * or userspace interfaces. Example steps belonging here: lock initialization,
299 * system memory allocation, setting up device specific attributes and
300 * function hooks not requiring accessing the device.
301 */
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200302static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100303{
Chris Wilson0673ad42016-06-24 14:00:22 +0100304 int ret = 0;
305
Michal Wajdeczko50d84412019-08-02 18:40:50 +0000306 if (i915_inject_probe_failure(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100307 return -ENODEV;
308
Tvrtko Ursulin805446c2019-03-27 14:23:28 +0000309 intel_device_info_subplatform_init(dev_priv);
Jani Nikulaef47b7a2021-03-26 15:21:34 +0200310 intel_step_init(dev_priv);
Tvrtko Ursulin805446c2019-03-27 14:23:28 +0000311
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100312 intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
Daniele Ceraolo Spurio01385752019-06-19 18:00:18 -0700313 intel_uncore_init_early(&dev_priv->uncore, dev_priv);
Daniele Ceraolo Spurio6cbe88302019-04-02 13:10:31 -0700314
Chris Wilson0673ad42016-06-24 14:00:22 +0100315 spin_lock_init(&dev_priv->irq_lock);
316 spin_lock_init(&dev_priv->gpu_error.lock);
317 mutex_init(&dev_priv->backlight_lock);
Lyude317eaa92017-02-03 21:18:25 -0500318
Chris Wilson0673ad42016-06-24 14:00:22 +0100319 mutex_init(&dev_priv->sb_lock);
Rafael J. Wysocki4d4dda42020-02-12 00:12:10 +0100320 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
Chris Wilsona75d0352019-04-26 09:17:18 +0100321
Chris Wilson0673ad42016-06-24 14:00:22 +0100322 mutex_init(&dev_priv->av_mutex);
323 mutex_init(&dev_priv->wm.wm_mutex);
324 mutex_init(&dev_priv->pps_mutex);
Ramalingam C9055aac2019-02-16 23:06:51 +0530325 mutex_init(&dev_priv->hdcp_comp_mutex);
Chris Wilson0673ad42016-06-24 14:00:22 +0100326
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100327 i915_memcpy_init_early(dev_priv);
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700328 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
Chris Wilson0b1de5d2016-08-12 12:39:59 +0100329
Chris Wilson0673ad42016-06-24 14:00:22 +0100330 ret = i915_workqueues_init(dev_priv);
331 if (ret < 0)
Chris Wilsonf3bcb0c2019-07-18 08:00:10 +0100332 return ret;
Chris Wilson0673ad42016-06-24 14:00:22 +0100333
Jani Nikulafb5f4322020-02-12 16:40:57 +0200334 ret = vlv_suspend_init(dev_priv);
Daniele Ceraolo Spurio1bcd8682019-08-19 19:01:46 -0700335 if (ret < 0)
336 goto err_workqueues;
337
Daniele Ceraolo Spurio6f760982019-07-31 17:57:08 -0700338 intel_wopcm_init_early(&dev_priv->wopcm);
339
Tvrtko Ursulin724e9562019-06-21 08:07:42 +0100340 intel_gt_init_early(&dev_priv->gt, dev_priv);
Tvrtko Ursulin24635c52019-06-21 08:07:41 +0100341
Matthew Aulda3f356b2019-09-27 18:33:49 +0100342 i915_gem_init_early(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000343
Chris Wilson0673ad42016-06-24 14:00:22 +0100344 /* This must be called before any calls to HAS_PCH_* */
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000345 intel_detect_pch(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100346
Tvrtko Ursulin192aa182016-12-01 14:16:45 +0000347 intel_pm_setup(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300348 ret = intel_power_domains_init(dev_priv);
349 if (ret < 0)
Daniele Ceraolo Spurio6f760982019-07-31 17:57:08 -0700350 goto err_gem;
Chris Wilson0673ad42016-06-24 14:00:22 +0100351 intel_irq_init(dev_priv);
352 intel_init_display_hooks(dev_priv);
353 intel_init_clock_gating_hooks(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100354
Paulo Zanoni4fc7e842016-09-26 15:07:52 +0300355 intel_detect_preproduction_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100356
357 return 0;
358
Daniele Ceraolo Spurio6f760982019-07-31 17:57:08 -0700359err_gem:
Imre Deakf28ec6f2018-08-06 12:58:37 +0300360 i915_gem_cleanup_early(dev_priv);
Daniele Ceraolo Spurio6cf72db2019-07-31 17:57:07 -0700361 intel_gt_driver_late_release(&dev_priv->gt);
Jani Nikulafb5f4322020-02-12 16:40:57 +0200362 vlv_suspend_cleanup(dev_priv);
Daniele Ceraolo Spurio1bcd8682019-08-19 19:01:46 -0700363err_workqueues:
Chris Wilson0673ad42016-06-24 14:00:22 +0100364 i915_workqueues_cleanup(dev_priv);
365 return ret;
366}
367
368/**
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200369 * i915_driver_late_release - cleanup the setup done in
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200370 * i915_driver_early_probe()
Chris Wilson0673ad42016-06-24 14:00:22 +0100371 * @dev_priv: device private
372 */
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200373static void i915_driver_late_release(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100374{
Joonas Lahtinencefcff82017-04-28 10:58:39 +0300375 intel_irq_fini(dev_priv);
Imre Deakf28ec6f2018-08-06 12:58:37 +0300376 intel_power_domains_cleanup(dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +0000377 i915_gem_cleanup_early(dev_priv);
Daniele Ceraolo Spurio6cf72db2019-07-31 17:57:07 -0700378 intel_gt_driver_late_release(&dev_priv->gt);
Jani Nikulafb5f4322020-02-12 16:40:57 +0200379 vlv_suspend_cleanup(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100380 i915_workqueues_cleanup(dev_priv);
Chris Wilsona75d0352019-04-26 09:17:18 +0100381
Rafael J. Wysocki4d4dda42020-02-12 00:12:10 +0100382 cpu_latency_qos_remove_request(&dev_priv->sb_qos);
Chris Wilsona75d0352019-04-26 09:17:18 +0100383 mutex_destroy(&dev_priv->sb_lock);
Jani Nikula8a25c4b2020-06-18 18:04:02 +0300384
385 i915_params_free(&dev_priv->params);
Chris Wilson0673ad42016-06-24 14:00:22 +0100386}
387
Chris Wilson0673ad42016-06-24 14:00:22 +0100388/**
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200389 * i915_driver_mmio_probe - setup device MMIO
Chris Wilson0673ad42016-06-24 14:00:22 +0100390 * @dev_priv: device private
391 *
392 * Setup minimal device state necessary for MMIO accesses later in the
393 * initialization sequence. The setup here should avoid any other device-wide
394 * side effects or exposing the driver via kernel internal or user space
395 * interfaces.
396 */
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200397static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100398{
Chris Wilson0673ad42016-06-24 14:00:22 +0100399 int ret;
400
Michal Wajdeczko50d84412019-08-02 18:40:50 +0000401 if (i915_inject_probe_failure(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100402 return -ENODEV;
403
Tvrtko Ursulinda5f53b2016-12-01 14:16:40 +0000404 if (i915_get_bridge_dev(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100405 return -EIO;
406
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -0700407 ret = intel_uncore_init_mmio(&dev_priv->uncore);
Chris Wilson0673ad42016-06-24 14:00:22 +0100408 if (ret < 0)
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300409 goto err_bridge;
Chris Wilson0673ad42016-06-24 14:00:22 +0100410
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -0700411 /* Try to make sure MCHBAR is enabled before poking at it */
412 intel_setup_mchbar(dev_priv);
Chris Wilsonc864e9a2021-01-04 11:51:41 +0000413 intel_device_info_runtime_init(dev_priv);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300414
Daniele Ceraolo Spuriod0eb6862020-07-07 17:39:48 -0700415 ret = intel_gt_init_mmio(&dev_priv->gt);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300416 if (ret)
417 goto err_uncore;
418
Chris Wilson640b50f2019-12-28 11:12:55 +0000419 /* As early as possible, scrub existing GPU state before clobbering */
420 sanitize_gpu(dev_priv);
421
Chris Wilson0673ad42016-06-24 14:00:22 +0100422 return 0;
423
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300424err_uncore:
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -0700425 intel_teardown_mchbar(dev_priv);
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -0700426 intel_uncore_fini_mmio(&dev_priv->uncore);
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300427err_bridge:
Chris Wilson0673ad42016-06-24 14:00:22 +0100428 pci_dev_put(dev_priv->bridge_dev);
429
430 return ret;
431}
432
433/**
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200434 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
Chris Wilson0673ad42016-06-24 14:00:22 +0100435 * @dev_priv: device private
436 */
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200437static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100438{
Daniele Ceraolo Spurio25286aa2019-03-19 11:35:40 -0700439 intel_teardown_mchbar(dev_priv);
Daniele Ceraolo Spurio3de6f852019-04-02 13:10:32 -0700440 intel_uncore_fini_mmio(&dev_priv->uncore);
Chris Wilson0673ad42016-06-24 14:00:22 +0100441 pci_dev_put(dev_priv->bridge_dev);
442}
443
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100444static void intel_sanitize_options(struct drm_i915_private *dev_priv)
445{
Chuanxiao Dong67b7f332017-05-27 17:44:17 +0800446 intel_gvt_sanitize_options(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100447}
448
Chris Wilson0673ad42016-06-24 14:00:22 +0100449/**
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400450 * i915_set_dma_info - set all relevant PCI dma info as configured for the
451 * platform
452 * @i915: valid i915 instance
453 *
454 * Set the dma max segment size, device and coherent masks. The dma mask set
455 * needs to occur before i915_ggtt_probe_hw.
456 *
457 * A couple of platforms have special needs. Address them as well.
458 *
459 */
460static int i915_set_dma_info(struct drm_i915_private *i915)
461{
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400462 unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
463 int ret;
464
465 GEM_BUG_ON(!mask_size);
466
467 /*
468 * We don't have a max segment size, so set it to the max so sg's
469 * debugging layer doesn't complain
470 */
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100471 dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400472
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100473 ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400474 if (ret)
475 goto mask_err;
476
477 /* overlay on gen2 is broken and can't address above 1G */
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700478 if (GRAPHICS_VER(i915) == 2)
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400479 mask_size = 30;
480
481 /*
482 * 965GM sometimes incorrectly writes to hardware status page (HWS)
483 * using 32bit addressing, overwriting memory if HWS is located
484 * above 4GB.
485 *
486 * The documentation also mentions an issue with undefined
487 * behaviour if any general state is accessed within a page above 4GB,
488 * which also needs to be handled carefully.
489 */
490 if (IS_I965G(i915) || IS_I965GM(i915))
491 mask_size = 32;
492
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100493 ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400494 if (ret)
495 goto mask_err;
496
497 return 0;
498
499mask_err:
500 drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
501 return ret;
502}
503
504/**
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200505 * i915_driver_hw_probe - setup state requiring device access
Chris Wilson0673ad42016-06-24 14:00:22 +0100506 * @dev_priv: device private
507 *
508 * Setup state that requires accessing the device, but doesn't require
509 * exposing the driver via kernel internal or userspace interfaces.
510 */
Janusz Krzysztofik0b61b8b2019-07-12 13:24:30 +0200511static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100512{
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100513 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100514 int ret;
515
Michal Wajdeczko50d84412019-08-02 18:40:50 +0000516 if (i915_inject_probe_failure(dev_priv))
Chris Wilson0673ad42016-06-24 14:00:22 +0100517 return -ENODEV;
518
Chris Wilson4bdafb92018-09-26 21:12:22 +0100519 if (HAS_PPGTT(dev_priv)) {
520 if (intel_vgpu_active(dev_priv) &&
Chris Wilsonca6ac682019-03-14 22:38:35 +0000521 !intel_vgpu_has_full_ppgtt(dev_priv)) {
Chris Wilson4bdafb92018-09-26 21:12:22 +0100522 i915_report_error(dev_priv,
523 "incompatible vGPU found, support for isolated ppGTT required\n");
524 return -ENXIO;
525 }
526 }
527
Chris Wilson46592892018-11-30 12:59:54 +0000528 if (HAS_EXECLISTS(dev_priv)) {
529 /*
530 * Older GVT emulation depends upon intercepting CSB mmio,
531 * which we no longer use, preferring to use the HWSP cache
532 * instead.
533 */
534 if (intel_vgpu_active(dev_priv) &&
535 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
536 i915_report_error(dev_priv,
537 "old vGPU host found, support for HWSP emulation required\n");
538 return -ENXIO;
539 }
540 }
541
Chris Wilson94b4f3b2016-07-05 10:40:20 +0100542 intel_sanitize_options(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100543
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -0700544 /* needs to be done before ggtt probe */
Jani Nikulad28ae3b2020-02-25 13:15:07 +0200545 intel_dram_edram_detect(dev_priv);
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -0700546
Michael J. Ruhl31a02eb2020-04-17 15:51:07 -0400547 ret = i915_set_dma_info(dev_priv);
548 if (ret)
549 return ret;
550
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +0100551 i915_perf_init(dev_priv);
552
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100553 ret = i915_ggtt_probe_hw(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100554 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +0100555 goto err_perf;
Chris Wilson0673ad42016-06-24 14:00:22 +0100556
Thomas Zimmermann6848c292021-04-12 15:10:42 +0200557 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
Gerd Hoffmannf2521f72019-08-22 11:06:45 +0200558 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +0100559 goto err_ggtt;
Chris Wilson0673ad42016-06-24 14:00:22 +0100560
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100561 ret = i915_ggtt_init_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +0100562 if (ret)
Chris Wilson9f172f62018-04-14 10:12:33 +0100563 goto err_ggtt;
Chris Wilson0088e522016-08-04 07:52:21 +0100564
Chris Wilson3fc794f2019-10-26 21:20:32 +0100565 ret = intel_memory_regions_hw_probe(dev_priv);
566 if (ret)
567 goto err_ggtt;
568
Chris Wilson797a6152019-11-01 14:10:06 +0000569 intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
Tvrtko Ursulind8a44242019-06-21 08:08:06 +0100570
Matthew Auld2dfcc7f2021-01-27 13:14:10 +0000571 ret = intel_gt_probe_lmem(&dev_priv->gt);
572 if (ret)
573 goto err_mem_regions;
574
Chris Wilson97d6d7a2016-08-04 07:52:22 +0100575 ret = i915_ggtt_enable_hw(dev_priv);
Chris Wilson0088e522016-08-04 07:52:21 +0100576 if (ret) {
Wambui Karuga00376cc2020-01-31 12:34:12 +0300577 drm_err(&dev_priv->drm, "failed to enable GGTT\n");
Chris Wilson3fc794f2019-10-26 21:20:32 +0100578 goto err_mem_regions;
Chris Wilson0088e522016-08-04 07:52:21 +0100579 }
580
David Weinehall52a05c32016-08-22 13:32:44 +0300581 pci_set_master(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100582
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +0000583 intel_gt_init_workarounds(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100584
585 /* On the 945G/GM, the chipset reports the MSI capability on the
586 * integrated graphics even though the support isn't actually there
587 * according to the published specs. It doesn't appear to function
588 * correctly in testing on 945G.
589 * This may be a side effect of MSI having been made available for PEG
590 * and the registers being closely associated.
591 *
592 * According to chipset errata, on the 965GM, MSI interrupts may
Ville Syrjäläe38c2da2017-06-26 23:30:51 +0300593 * be lost or delayed, and was defeatured. MSI interrupts seem to
594 * get lost on g4x as well, and interrupt delivery seems to stay
595 * properly dead afterwards. So we'll just disable them for all
596 * pre-gen5 chipsets.
Lucas De Marchi8a29c772018-05-23 11:04:35 -0700597 *
598 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
599 * interrupts even when in MSI mode. This results in spurious
600 * interrupt warnings if the legacy irq no. is shared with another
601 * device. The kernel then disables that interrupt source and so
602 * prevents the other device from working properly.
Chris Wilson0673ad42016-06-24 14:00:22 +0100603 */
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700604 if (GRAPHICS_VER(dev_priv) >= 5) {
David Weinehall52a05c32016-08-22 13:32:44 +0300605 if (pci_enable_msi(pdev) < 0)
Wambui Karuga00376cc2020-01-31 12:34:12 +0300606 drm_dbg(&dev_priv->drm, "can't enable MSI");
Chris Wilson0673ad42016-06-24 14:00:22 +0100607 }
608
Zhenyu Wang26f837e2017-01-13 10:46:09 +0800609 ret = intel_gvt_init(dev_priv);
610 if (ret)
Chris Wilson7ab87ed2018-07-10 15:38:21 +0100611 goto err_msi;
612
613 intel_opregion_setup(dev_priv);
José Roberto de Souza5d0c9382021-01-28 08:43:11 -0800614
615 intel_pcode_init(dev_priv);
616
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +0530617 /*
José Roberto de Souzaf0b29702021-01-28 08:43:10 -0800618 * Fill the dram structure to get the system dram info. This will be
619 * used for memory latency calculation.
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +0530620 */
Jani Nikulad28ae3b2020-02-25 13:15:07 +0200621 intel_dram_detect(dev_priv);
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +0530622
Ville Syrjäläc457d9c2019-05-24 18:36:14 +0300623 intel_bw_init_hw(dev_priv);
Zhenyu Wang26f837e2017-01-13 10:46:09 +0800624
Chris Wilson0673ad42016-06-24 14:00:22 +0100625 return 0;
626
Chris Wilson7ab87ed2018-07-10 15:38:21 +0100627err_msi:
628 if (pdev->msi_enabled)
629 pci_disable_msi(pdev);
Chris Wilson3fc794f2019-10-26 21:20:32 +0100630err_mem_regions:
631 intel_memory_regions_driver_release(dev_priv);
Chris Wilson9f172f62018-04-14 10:12:33 +0100632err_ggtt:
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200633 i915_ggtt_driver_release(dev_priv);
Chris Wilson9f172f62018-04-14 10:12:33 +0100634err_perf:
635 i915_perf_fini(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100636 return ret;
637}
638
639/**
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +0200640 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
Chris Wilson0673ad42016-06-24 14:00:22 +0100641 * @dev_priv: device private
642 */
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +0200643static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
Chris Wilson0673ad42016-06-24 14:00:22 +0100644{
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100645 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100646
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +0100647 i915_perf_fini(dev_priv);
648
David Weinehall52a05c32016-08-22 13:32:44 +0300649 if (pdev->msi_enabled)
650 pci_disable_msi(pdev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100651}
652
653/**
654 * i915_driver_register - register the driver with the rest of the system
655 * @dev_priv: device private
656 *
657 * Perform any steps necessary to make the driver available via kernel
658 * internal or userspace interfaces.
659 */
660static void i915_driver_register(struct drm_i915_private *dev_priv)
661{
Chris Wilson91c8a322016-07-05 10:40:23 +0100662 struct drm_device *dev = &dev_priv->drm;
Chris Wilson0673ad42016-06-24 14:00:22 +0100663
Chris Wilsonc29579d2019-08-06 13:42:59 +0100664 i915_gem_driver_register(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000665 i915_pmu_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100666
Jani Nikula9e859eb2020-02-27 16:44:06 +0200667 intel_vgpu_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100668
669 /* Reveal our presence to userspace */
Lucas De Marchiec3e00b2021-02-12 20:27:53 -0800670 if (drm_dev_register(dev, 0)) {
Wambui Karuga00376cc2020-01-31 12:34:12 +0300671 drm_err(&dev_priv->drm,
672 "Failed to register driver for userspace access!\n");
Lucas De Marchiec3e00b2021-02-12 20:27:53 -0800673 return;
674 }
675
676 i915_debugfs_register(dev_priv);
Lucas De Marchiec3e00b2021-02-12 20:27:53 -0800677 i915_setup_sysfs(dev_priv);
678
679 /* Depends on sysfs having been initialized */
680 i915_perf_register(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100681
Lucas De Marchief7eff12021-02-12 20:27:54 -0800682 intel_gt_driver_register(&dev_priv->gt);
683
Lucas De Marchi141b4152021-02-12 20:27:55 -0800684 intel_display_driver_register(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +0300685
Imre Deak2cd9a682018-08-16 15:37:57 +0300686 intel_power_domains_enable(dev_priv);
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700687 intel_runtime_pm_enable(&dev_priv->runtime_pm);
Jani Nikula46edcdb2020-02-11 18:28:01 +0200688
689 intel_register_dsm_handler();
690
691 if (i915_switcheroo_register(dev_priv))
692 drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
Chris Wilson0673ad42016-06-24 14:00:22 +0100693}
694
695/**
696 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
697 * @dev_priv: device private
698 */
699static void i915_driver_unregister(struct drm_i915_private *dev_priv)
700{
Jani Nikula46edcdb2020-02-11 18:28:01 +0200701 i915_switcheroo_unregister(dev_priv);
702
703 intel_unregister_dsm_handler();
704
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700705 intel_runtime_pm_disable(&dev_priv->runtime_pm);
Imre Deak2cd9a682018-08-16 15:37:57 +0300706 intel_power_domains_disable(dev_priv);
Chris Wilson07d80572018-08-16 15:37:56 +0300707
Lucas De Marchi141b4152021-02-12 20:27:55 -0800708 intel_display_driver_unregister(dev_priv);
Chris Wilson448aa912017-11-28 11:01:47 +0000709
Andi Shyti42014f62019-09-05 14:14:03 +0300710 intel_gt_driver_unregister(&dev_priv->gt);
Chris Wilson0673ad42016-06-24 14:00:22 +0100711
Robert Bragg442b8c02016-11-07 19:49:53 +0000712 i915_perf_unregister(dev_priv);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000713 i915_pmu_unregister(dev_priv);
Robert Bragg442b8c02016-11-07 19:49:53 +0000714
David Weinehall694c2822016-08-22 13:32:43 +0300715 i915_teardown_sysfs(dev_priv);
Janusz Krzysztofikd69990e2019-04-05 15:02:34 +0200716 drm_dev_unplug(&dev_priv->drm);
Chris Wilson0673ad42016-06-24 14:00:22 +0100717
Chris Wilsonc29579d2019-08-06 13:42:59 +0100718 i915_gem_driver_unregister(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100719}
720
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000721static void i915_welcome_messages(struct drm_i915_private *dev_priv)
722{
Jani Nikulabdbf43d2019-10-28 12:38:15 +0200723 if (drm_debug_enabled(DRM_UT_DRIVER)) {
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000724 struct drm_printer p = drm_debug_printer("i915 device info:");
725
Tvrtko Ursulin805446c2019-03-27 14:23:28 +0000726 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
Jani Nikula1787a982018-12-31 16:56:45 +0200727 INTEL_DEVID(dev_priv),
728 INTEL_REVID(dev_priv),
729 intel_platform_name(INTEL_INFO(dev_priv)->platform),
Tvrtko Ursulin805446c2019-03-27 14:23:28 +0000730 intel_subplatform(RUNTIME_INFO(dev_priv),
731 INTEL_INFO(dev_priv)->platform),
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700732 GRAPHICS_VER(dev_priv));
Jani Nikula1787a982018-12-31 16:56:45 +0200733
Chris Wilson72404972019-12-07 18:29:37 +0000734 intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
735 intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -0700736 intel_gt_info_print(&dev_priv->gt.info, &p);
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000737 }
738
739 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
Wambui Karuga00376cc2020-01-31 12:34:12 +0300740 drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000741 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
Wambui Karuga00376cc2020-01-31 12:34:12 +0300742 drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
Imre Deak6dfc4a82018-08-16 22:34:14 +0300743 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
Wambui Karuga00376cc2020-01-31 12:34:12 +0300744 drm_info(&dev_priv->drm,
745 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000746}
747
Chris Wilson55ac5a12018-09-05 15:09:20 +0100748static struct drm_i915_private *
749i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
750{
751 const struct intel_device_info *match_info =
752 (struct intel_device_info *)ent->driver_data;
753 struct intel_device_info *device_info;
754 struct drm_i915_private *i915;
755
Daniel Vetter274ed9e2020-04-15 09:40:13 +0200756 i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
757 struct drm_i915_private, drm);
758 if (IS_ERR(i915))
759 return i915;
Chris Wilson55ac5a12018-09-05 15:09:20 +0100760
Chris Wilson361f9dc2019-08-06 08:42:19 +0100761 pci_set_drvdata(pdev, i915);
Chris Wilson55ac5a12018-09-05 15:09:20 +0100762
Jani Nikula8a25c4b2020-06-18 18:04:02 +0300763 /* Device parameters start as a copy of module parameters. */
764 i915_params_copy(&i915->params, &i915_modparams);
765
Chris Wilson55ac5a12018-09-05 15:09:20 +0100766 /* Setup the write-once "constant" device info */
767 device_info = mkwrite_device_info(i915);
768 memcpy(device_info, match_info, sizeof(*device_info));
Jani Nikula02584042018-12-31 16:56:41 +0200769 RUNTIME_INFO(i915)->device_id = pdev->device;
Chris Wilson55ac5a12018-09-05 15:09:20 +0100770
Chris Wilson55ac5a12018-09-05 15:09:20 +0100771 return i915;
772}
773
Chris Wilson0673ad42016-06-24 14:00:22 +0100774/**
Janusz Krzysztofikb01558e2019-07-12 13:24:26 +0200775 * i915_driver_probe - setup chip and create an initial config
Joonas Lahtinend2ad3ae2016-11-10 15:36:34 +0200776 * @pdev: PCI device
777 * @ent: matching PCI ID entry
Chris Wilson0673ad42016-06-24 14:00:22 +0100778 *
Janusz Krzysztofikb01558e2019-07-12 13:24:26 +0200779 * The driver probe routine has to do several things:
Chris Wilson0673ad42016-06-24 14:00:22 +0100780 * - drive output discovery via intel_modeset_init()
781 * - initialize the memory manager
782 * - allocate initial config memory
783 * - setup the DRM framebuffer with the allocated memory
784 */
Janusz Krzysztofikb01558e2019-07-12 13:24:26 +0200785int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Chris Wilson0673ad42016-06-24 14:00:22 +0100786{
Maarten Lankhorst8d2b47d2017-02-02 08:41:42 +0100787 const struct intel_device_info *match_info =
788 (struct intel_device_info *)ent->driver_data;
Jani Nikula8eecfb32020-02-11 18:28:02 +0200789 struct drm_i915_private *i915;
Chris Wilson0673ad42016-06-24 14:00:22 +0100790 int ret;
791
Jani Nikula8eecfb32020-02-11 18:28:02 +0200792 i915 = i915_driver_create(pdev, ent);
793 if (IS_ERR(i915))
794 return PTR_ERR(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100795
Ville Syrjälä1feb64c2018-09-13 16:16:22 +0300796 /* Disable nuclear pageflip by default on pre-ILK */
Lucas De Marchi88021902021-04-12 22:09:59 -0700797 if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5)
Jani Nikula8eecfb32020-02-11 18:28:02 +0200798 i915->drm.driver_features &= ~DRIVER_ATOMIC;
Ville Syrjälä1feb64c2018-09-13 16:16:22 +0300799
Matthew Auld16292242019-10-30 17:33:20 +0000800 /*
801 * Check if we support fake LMEM -- for now we only unleash this for
802 * the live selftests(test-and-exit).
803 */
Chris Wilson292a27b2019-11-01 09:51:47 +0000804#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
Matthew Auld16292242019-10-30 17:33:20 +0000805 if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700806 if (GRAPHICS_VER(i915) >= 9 && i915_selftest.live < 0 &&
Jani Nikula8a25c4b2020-06-18 18:04:02 +0300807 i915->params.fake_lmem_start) {
Jani Nikula8eecfb32020-02-11 18:28:02 +0200808 mkwrite_device_info(i915)->memory_regions =
Matthew Aulddc430402021-02-05 10:20:26 +0000809 REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
Jani Nikula8eecfb32020-02-11 18:28:02 +0200810 GEM_BUG_ON(!HAS_LMEM(i915));
Matthew Auld16292242019-10-30 17:33:20 +0000811 }
812 }
Chris Wilson292a27b2019-11-01 09:51:47 +0000813#endif
Matthew Auld16292242019-10-30 17:33:20 +0000814
Chris Wilson0673ad42016-06-24 14:00:22 +0100815 ret = pci_enable_device(pdev);
816 if (ret)
Chris Wilsoncad36882017-02-10 16:35:21 +0000817 goto out_fini;
Chris Wilson0673ad42016-06-24 14:00:22 +0100818
Jani Nikula8eecfb32020-02-11 18:28:02 +0200819 ret = i915_driver_early_probe(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100820 if (ret < 0)
821 goto out_pci_disable;
822
Jani Nikula8eecfb32020-02-11 18:28:02 +0200823 disable_rpm_wakeref_asserts(&i915->runtime_pm);
Chris Wilson0673ad42016-06-24 14:00:22 +0100824
Jani Nikula9e859eb2020-02-27 16:44:06 +0200825 intel_vgpu_detect(i915);
Daniele Ceraolo Spurio9e138ea2019-06-19 18:00:21 -0700826
Jani Nikula8eecfb32020-02-11 18:28:02 +0200827 ret = i915_driver_mmio_probe(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100828 if (ret < 0)
829 goto out_runtime_pm_put;
830
Jani Nikula8eecfb32020-02-11 18:28:02 +0200831 ret = i915_driver_hw_probe(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100832 if (ret < 0)
833 goto out_cleanup_mmio;
834
Jani Nikulad6843dd2020-09-02 17:30:23 +0300835 ret = intel_modeset_init_noirq(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100836 if (ret < 0)
Daniel Vetterbaf54382017-06-21 10:28:41 +0200837 goto out_cleanup_hw;
Chris Wilson0673ad42016-06-24 14:00:22 +0100838
Jani Nikulab6642592020-02-19 15:37:56 +0200839 ret = intel_irq_install(i915);
840 if (ret)
841 goto out_cleanup_modeset;
842
Jani Nikulad6843dd2020-09-02 17:30:23 +0300843 ret = intel_modeset_init_nogem(i915);
844 if (ret)
Jani Nikulab6642592020-02-19 15:37:56 +0200845 goto out_cleanup_irq;
846
Jani Nikulad6843dd2020-09-02 17:30:23 +0300847 ret = i915_gem_init(i915);
848 if (ret)
849 goto out_cleanup_modeset2;
850
851 ret = intel_modeset_init(i915);
852 if (ret)
853 goto out_cleanup_gem;
854
Jani Nikula8eecfb32020-02-11 18:28:02 +0200855 i915_driver_register(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100856
Jani Nikula8eecfb32020-02-11 18:28:02 +0200857 enable_rpm_wakeref_asserts(&i915->runtime_pm);
Chris Wilson0673ad42016-06-24 14:00:22 +0100858
Jani Nikula8eecfb32020-02-11 18:28:02 +0200859 i915_welcome_messages(i915);
Michal Wajdeczko27d558a2017-12-21 21:57:35 +0000860
Daniel Vetter7fb81e92020-03-23 15:49:07 +0100861 i915->do_release = true;
862
Chris Wilson0673ad42016-06-24 14:00:22 +0100863 return 0;
864
Jani Nikulad6843dd2020-09-02 17:30:23 +0300865out_cleanup_gem:
866 i915_gem_suspend(i915);
867 i915_gem_driver_remove(i915);
868 i915_gem_driver_release(i915);
869out_cleanup_modeset2:
870 /* FIXME clean up the error path */
871 intel_modeset_driver_remove(i915);
872 intel_irq_uninstall(i915);
873 intel_modeset_driver_remove_noirq(i915);
874 goto out_cleanup_modeset;
Jani Nikulab6642592020-02-19 15:37:56 +0200875out_cleanup_irq:
876 intel_irq_uninstall(i915);
877out_cleanup_modeset:
Jani Nikulaeb4612d2020-09-02 17:30:22 +0300878 intel_modeset_driver_remove_nogem(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100879out_cleanup_hw:
Jani Nikula8eecfb32020-02-11 18:28:02 +0200880 i915_driver_hw_remove(i915);
881 intel_memory_regions_driver_release(i915);
882 i915_ggtt_driver_release(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100883out_cleanup_mmio:
Jani Nikula8eecfb32020-02-11 18:28:02 +0200884 i915_driver_mmio_release(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100885out_runtime_pm_put:
Jani Nikula8eecfb32020-02-11 18:28:02 +0200886 enable_rpm_wakeref_asserts(&i915->runtime_pm);
887 i915_driver_late_release(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100888out_pci_disable:
889 pci_disable_device(pdev);
Chris Wilsoncad36882017-02-10 16:35:21 +0000890out_fini:
Jani Nikula8eecfb32020-02-11 18:28:02 +0200891 i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
Chris Wilson0673ad42016-06-24 14:00:22 +0100892 return ret;
893}
894
Chris Wilson361f9dc2019-08-06 08:42:19 +0100895void i915_driver_remove(struct drm_i915_private *i915)
Chris Wilson0673ad42016-06-24 14:00:22 +0100896{
Chris Wilson361f9dc2019-08-06 08:42:19 +0100897 disable_rpm_wakeref_asserts(&i915->runtime_pm);
Chris Wilson07d80572018-08-16 15:37:56 +0300898
Chris Wilson361f9dc2019-08-06 08:42:19 +0100899 i915_driver_unregister(i915);
Daniel Vetter99c539b2017-07-15 00:46:56 +0200900
Chris Wilson4a8ab5e2019-01-14 14:21:29 +0000901 /* Flush any external code that still may be under the RCU lock */
902 synchronize_rcu();
903
Chris Wilson361f9dc2019-08-06 08:42:19 +0100904 i915_gem_suspend(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100905
Chris Wilson361f9dc2019-08-06 08:42:19 +0100906 intel_gvt_driver_remove(i915);
Zhenyu Wang26f837e2017-01-13 10:46:09 +0800907
Jani Nikulaeb4612d2020-09-02 17:30:22 +0300908 intel_modeset_driver_remove(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100909
Jani Nikulaf20a60f2020-02-14 15:50:58 +0200910 intel_irq_uninstall(i915);
911
José Roberto de Souzac0ff9e52020-04-16 11:58:41 -0700912 intel_modeset_driver_remove_noirq(i915);
Jani Nikulaf20a60f2020-02-14 15:50:58 +0200913
Chris Wilson361f9dc2019-08-06 08:42:19 +0100914 i915_reset_error_state(i915);
Chris Wilson361f9dc2019-08-06 08:42:19 +0100915 i915_gem_driver_remove(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100916
Jani Nikulaeb4612d2020-09-02 17:30:22 +0300917 intel_modeset_driver_remove_nogem(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100918
Chris Wilson361f9dc2019-08-06 08:42:19 +0100919 i915_driver_hw_remove(i915);
Chris Wilson0673ad42016-06-24 14:00:22 +0100920
Chris Wilson361f9dc2019-08-06 08:42:19 +0100921 enable_rpm_wakeref_asserts(&i915->runtime_pm);
Chris Wilsoncad36882017-02-10 16:35:21 +0000922}
923
924static void i915_driver_release(struct drm_device *dev)
925{
926 struct drm_i915_private *dev_priv = to_i915(dev);
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700927 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
Chris Wilson0673ad42016-06-24 14:00:22 +0100928
Daniel Vetter7fb81e92020-03-23 15:49:07 +0100929 if (!dev_priv->do_release)
930 return;
931
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700932 disable_rpm_wakeref_asserts(rpm);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +0200933
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200934 i915_gem_driver_release(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +0200935
Chris Wilson3fc794f2019-10-26 21:20:32 +0100936 intel_memory_regions_driver_release(dev_priv);
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200937 i915_ggtt_driver_release(dev_priv);
Chris Wilson89351922020-07-29 17:42:18 +0100938 i915_gem_drain_freed_objects(dev_priv);
Daniele Ceraolo Spurio19e0a8d2019-06-19 18:00:17 -0700939
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200940 i915_driver_mmio_release(dev_priv);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +0200941
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -0700942 enable_rpm_wakeref_asserts(rpm);
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200943 intel_runtime_pm_driver_release(rpm);
Janusz Krzysztofik47bc28d2019-05-30 15:31:05 +0200944
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +0200945 i915_driver_late_release(dev_priv);
Chris Wilson0673ad42016-06-24 14:00:22 +0100946}
947
948static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
949{
Chris Wilson829a0af2017-06-20 12:05:45 +0100950 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson0673ad42016-06-24 14:00:22 +0100951 int ret;
952
Chris Wilson829a0af2017-06-20 12:05:45 +0100953 ret = i915_gem_open(i915, file);
Chris Wilson0673ad42016-06-24 14:00:22 +0100954 if (ret)
955 return ret;
956
957 return 0;
958}
959
960/**
961 * i915_driver_lastclose - clean up after all DRM clients have exited
962 * @dev: DRM device
963 *
964 * Take care of cleaning up after all DRM clients have exited. In the
965 * mode setting case, we want to restore the kernel's initial mode (just
966 * in case the last client left us in a bad state).
967 *
968 * Additionally, in the non-mode setting case, we'll tear down the GTT
969 * and DMA structures, since the kernel won't be using them, and clea
970 * up any GEM state.
971 */
972static void i915_driver_lastclose(struct drm_device *dev)
973{
José Roberto de Souza5df7bd12021-04-08 13:31:50 -0700974 struct drm_i915_private *i915 = to_i915(dev);
975
Chris Wilson0673ad42016-06-24 14:00:22 +0100976 intel_fbdev_restore_mode(dev);
José Roberto de Souza5df7bd12021-04-08 13:31:50 -0700977
978 if (HAS_DISPLAY(i915))
979 vga_switcheroo_process_delayed_switch();
Chris Wilson0673ad42016-06-24 14:00:22 +0100980}
981
Daniel Vetter7d2ec882017-03-08 15:12:45 +0100982static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
Chris Wilson0673ad42016-06-24 14:00:22 +0100983{
Daniel Vetter7d2ec882017-03-08 15:12:45 +0100984 struct drm_i915_file_private *file_priv = file->driver_priv;
985
Chris Wilson829a0af2017-06-20 12:05:45 +0100986 i915_gem_context_close(file);
Chris Wilson0673ad42016-06-24 14:00:22 +0100987
Chris Wilson77715902019-08-23 19:14:55 +0100988 kfree_rcu(file_priv, rcu);
Chris Wilson515b8b72019-08-02 22:21:37 +0100989
990 /* Catch up with all the deferred frees from "this" client */
991 i915_gem_flush_free_objects(to_i915(dev));
Chris Wilson0673ad42016-06-24 14:00:22 +0100992}
993
Imre Deak07f9cd02014-08-18 14:42:45 +0300994static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
995{
Chris Wilson91c8a322016-07-05 10:40:23 +0100996 struct drm_device *dev = &dev_priv->drm;
Jani Nikula19c80542015-12-16 12:48:16 +0200997 struct intel_encoder *encoder;
Imre Deak07f9cd02014-08-18 14:42:45 +0300998
José Roberto de Souza5df7bd12021-04-08 13:31:50 -0700999 if (!HAS_DISPLAY(dev_priv))
1000 return;
1001
Imre Deak07f9cd02014-08-18 14:42:45 +03001002 drm_modeset_lock_all(dev);
Jani Nikula19c80542015-12-16 12:48:16 +02001003 for_each_intel_encoder(dev, encoder)
1004 if (encoder->suspend)
1005 encoder->suspend(encoder);
Imre Deak07f9cd02014-08-18 14:42:45 +03001006 drm_modeset_unlock_all(dev);
1007}
1008
Ville Syrjälä100fe4c2020-10-01 18:16:36 +03001009static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
1010{
1011 struct drm_device *dev = &dev_priv->drm;
1012 struct intel_encoder *encoder;
1013
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001014 if (!HAS_DISPLAY(dev_priv))
1015 return;
1016
Ville Syrjälä100fe4c2020-10-01 18:16:36 +03001017 drm_modeset_lock_all(dev);
1018 for_each_intel_encoder(dev, encoder)
1019 if (encoder->shutdown)
1020 encoder->shutdown(encoder);
1021 drm_modeset_unlock_all(dev);
1022}
1023
Ville Syrjäläfe0f1e32020-10-01 18:16:35 +03001024void i915_driver_shutdown(struct drm_i915_private *i915)
1025{
Chris Wilson19fe4ac2021-01-04 20:39:05 +00001026 disable_rpm_wakeref_asserts(&i915->runtime_pm);
Imre Deak79628932021-01-27 20:19:09 +02001027 intel_runtime_pm_disable(&i915->runtime_pm);
1028 intel_power_domains_disable(i915);
Chris Wilson19fe4ac2021-01-04 20:39:05 +00001029
Ville Syrjäläfe0f1e32020-10-01 18:16:35 +03001030 i915_gem_suspend(i915);
1031
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001032 if (HAS_DISPLAY(i915)) {
1033 drm_kms_helper_poll_disable(&i915->drm);
Ville Syrjäläfe0f1e32020-10-01 18:16:35 +03001034
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001035 drm_atomic_helper_shutdown(&i915->drm);
1036 }
Ville Syrjäläfe0f1e32020-10-01 18:16:35 +03001037
1038 intel_dp_mst_suspend(i915);
1039
1040 intel_runtime_pm_disable_interrupts(i915);
1041 intel_hpd_cancel_work(i915);
1042
1043 intel_suspend_encoders(i915);
Ville Syrjälä100fe4c2020-10-01 18:16:36 +03001044 intel_shutdown_encoders(i915);
Chris Wilson19fe4ac2021-01-04 20:39:05 +00001045
Anusha Srivatsa74ff1502021-05-18 14:34:43 -07001046 intel_dmc_ucode_suspend(i915);
Imre Deakd339ef1c2021-03-11 16:45:29 +02001047
Imre Deak79628932021-01-27 20:19:09 +02001048 /*
1049 * The only requirement is to reboot with display DC states disabled,
1050 * for now leaving all display power wells in the INIT power domain
Imre Deakd339ef1c2021-03-11 16:45:29 +02001051 * enabled.
1052 *
1053 * TODO:
1054 * - unify the pci_driver::shutdown sequence here with the
1055 * pci_driver.driver.pm.poweroff,poweroff_late sequence.
1056 * - unify the driver remove and system/runtime suspend sequences with
1057 * the above unified shutdown/poweroff sequence.
Imre Deak79628932021-01-27 20:19:09 +02001058 */
1059 intel_power_domains_driver_remove(i915);
Chris Wilson19fe4ac2021-01-04 20:39:05 +00001060 enable_rpm_wakeref_asserts(&i915->runtime_pm);
Imre Deak79628932021-01-27 20:19:09 +02001061
1062 intel_runtime_pm_driver_release(&i915->runtime_pm);
Ville Syrjäläfe0f1e32020-10-01 18:16:35 +03001063}
1064
Imre Deakbc872292015-11-18 17:32:30 +02001065static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1066{
1067#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1068 if (acpi_target_system_state() < ACPI_STATE_S3)
1069 return true;
1070#endif
1071 return false;
1072}
Sagar Kambleebc32822014-08-13 23:07:05 +05301073
Chris Wilson73b66f82018-05-25 10:26:29 +01001074static int i915_drm_prepare(struct drm_device *dev)
1075{
1076 struct drm_i915_private *i915 = to_i915(dev);
Chris Wilson73b66f82018-05-25 10:26:29 +01001077
1078 /*
1079 * NB intel_display_suspend() may issue new requests after we've
1080 * ostensibly marked the GPU as ready-to-sleep here. We need to
1081 * split out that work and pull it forward so that after point,
1082 * the GPU is not woken again.
1083 */
Chris Wilson5861b012019-03-08 09:36:54 +00001084 i915_gem_suspend(i915);
Chris Wilson73b66f82018-05-25 10:26:29 +01001085
Chris Wilson5861b012019-03-08 09:36:54 +00001086 return 0;
Chris Wilson73b66f82018-05-25 10:26:29 +01001087}
1088
Imre Deak5e365c32014-10-23 19:23:25 +03001089static int i915_drm_suspend(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001090{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001091 struct drm_i915_private *dev_priv = to_i915(dev);
Thomas Zimmermann8ff54462021-01-28 14:31:23 +01001092 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
Jesse Barnese5747e32014-06-12 08:35:47 -07001093 pci_power_t opregion_target_state;
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001094
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001095 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak1f814da2015-12-16 02:52:19 +02001096
Paulo Zanonic67a4702013-08-19 13:18:09 -03001097 /* We do a lot of poking in a lot of registers, make sure they work
1098 * properly. */
Imre Deak2cd9a682018-08-16 15:37:57 +03001099 intel_power_domains_disable(dev_priv);
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001100 if (HAS_DISPLAY(dev_priv))
1101 drm_kms_helper_poll_disable(dev);
Dave Airlie5bcf7192010-12-07 09:20:40 +10001102
David Weinehall52a05c32016-08-22 13:32:44 +03001103 pci_save_state(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001104
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02001105 intel_display_suspend(dev);
Daniel Vetterd5818932015-02-23 12:03:26 +01001106
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03001107 intel_dp_mst_suspend(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001108
1109 intel_runtime_pm_disable_interrupts(dev_priv);
1110 intel_hpd_cancel_work(dev_priv);
1111
1112 intel_suspend_encoders(dev_priv);
1113
Ville Syrjälä712bf362016-10-31 22:37:23 +02001114 intel_suspend_hw(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001115
Chris Wilsone9862092020-01-30 18:17:09 +00001116 i915_ggtt_suspend(&dev_priv->ggtt);
Ben Widawsky828c7902013-10-16 09:21:30 -07001117
Ville Syrjälä0f8d2a22020-10-05 20:14:41 +03001118 i915_save_display(dev_priv);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001119
Imre Deakbc872292015-11-18 17:32:30 +02001120 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
Chris Wilsona950adc2018-10-30 11:05:54 +00001121 intel_opregion_suspend(dev_priv, opregion_target_state);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001122
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001123 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
Dave Airlie3fa016a2012-03-28 10:48:49 +01001124
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001125 dev_priv->suspend_count++;
1126
Anusha Srivatsa74ff1502021-05-18 14:34:43 -07001127 intel_dmc_ucode_suspend(dev_priv);
Imre Deakf514c2d2015-10-28 23:59:06 +02001128
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001129 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak1f814da2015-12-16 02:52:19 +02001130
Chris Wilson73b66f82018-05-25 10:26:29 +01001131 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001132}
1133
Imre Deak2cd9a682018-08-16 15:37:57 +03001134static enum i915_drm_suspend_mode
1135get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1136{
1137 if (hibernate)
1138 return I915_DRM_SUSPEND_HIBERNATE;
1139
1140 if (suspend_to_idle(dev_priv))
1141 return I915_DRM_SUSPEND_IDLE;
1142
1143 return I915_DRM_SUSPEND_MEM;
1144}
1145
David Weinehallc49d13e2016-08-22 13:32:42 +03001146static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
Imre Deakc3c09c92014-10-23 19:23:15 +03001147{
David Weinehallc49d13e2016-08-22 13:32:42 +03001148 struct drm_i915_private *dev_priv = to_i915(dev);
Thomas Zimmermann8ff54462021-01-28 14:31:23 +01001149 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -07001150 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
Jani Nikulafb5f4322020-02-12 16:40:57 +02001151 int ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03001152
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -07001153 disable_rpm_wakeref_asserts(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001154
Chris Wilsonec92ad02018-05-31 09:22:46 +01001155 i915_gem_suspend_late(dev_priv);
1156
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001157 intel_uncore_suspend(&dev_priv->uncore);
Imre Deak4c494a52016-10-13 14:34:06 +03001158
Imre Deak2cd9a682018-08-16 15:37:57 +03001159 intel_power_domains_suspend(dev_priv,
1160 get_suspend_mode(dev_priv, hibernation));
Imre Deak73dfc222015-11-17 17:33:53 +02001161
Rodrigo Vivi071b68c2019-08-06 15:22:08 +03001162 intel_display_power_suspend_late(dev_priv);
1163
Jani Nikulafb5f4322020-02-12 16:40:57 +02001164 ret = vlv_suspend_complete(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001165 if (ret) {
Wambui Karuga00376cc2020-01-31 12:34:12 +03001166 drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03001167 intel_power_domains_resume(dev_priv);
Imre Deakc3c09c92014-10-23 19:23:15 +03001168
Imre Deak1f814da2015-12-16 02:52:19 +02001169 goto out;
Imre Deakc3c09c92014-10-23 19:23:15 +03001170 }
1171
David Weinehall52a05c32016-08-22 13:32:44 +03001172 pci_disable_device(pdev);
Imre Deakab3be732015-03-02 13:04:41 +02001173 /*
Imre Deak54875572015-06-30 17:06:47 +03001174 * During hibernation on some platforms the BIOS may try to access
Imre Deakab3be732015-03-02 13:04:41 +02001175 * the device even though it's already in D3 and hang the machine. So
1176 * leave the device in D0 on those platforms and hope the BIOS will
Imre Deak54875572015-06-30 17:06:47 +03001177 * power down the device properly. The issue was seen on multiple old
1178 * GENs with different BIOS vendors, so having an explicit blacklist
1179 * is inpractical; apply the workaround on everything pre GEN6. The
1180 * platforms where the issue was seen:
1181 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1182 * Fujitsu FSC S7110
1183 * Acer Aspire 1830T
Imre Deakab3be732015-03-02 13:04:41 +02001184 */
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001185 if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
David Weinehall52a05c32016-08-22 13:32:44 +03001186 pci_set_power_state(pdev, PCI_D3hot);
Imre Deakc3c09c92014-10-23 19:23:15 +03001187
Imre Deak1f814da2015-12-16 02:52:19 +02001188out:
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -07001189 enable_rpm_wakeref_asserts(rpm);
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001190 if (!dev_priv->uncore.user_forcewake_count)
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001191 intel_runtime_pm_driver_release(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001192
1193 return ret;
Imre Deakc3c09c92014-10-23 19:23:15 +03001194}
1195
Jani Nikula63bf8302019-10-04 15:20:18 +03001196int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001197{
1198 int error;
1199
Pankaj Bharadiya48a1b8d2020-01-15 09:14:53 +05301200 if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1201 state.event != PM_EVENT_FREEZE))
Imre Deak0b14cbd2014-09-10 18:16:55 +03001202 return -EINVAL;
Dave Airlie5bcf7192010-12-07 09:20:40 +10001203
Chris Wilson361f9dc2019-08-06 08:42:19 +01001204 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10001205 return 0;
Chris Wilson6eecba32010-09-08 09:45:11 +01001206
Chris Wilson361f9dc2019-08-06 08:42:19 +01001207 error = i915_drm_suspend(&i915->drm);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001208 if (error)
1209 return error;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001210
Chris Wilson361f9dc2019-08-06 08:42:19 +01001211 return i915_drm_suspend_late(&i915->drm, false);
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001212}
1213
Imre Deak5e365c32014-10-23 19:23:25 +03001214static int i915_drm_resume(struct drm_device *dev)
Jesse Barnesba8bbcf2007-11-22 14:14:14 +10001215{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001216 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001217 int ret;
Matthew Garrett8ee1c3d2008-08-05 19:37:25 +01001218
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001219 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak1f814da2015-12-16 02:52:19 +02001220
Chris Wilson640b50f2019-12-28 11:12:55 +00001221 sanitize_gpu(dev_priv);
1222
Chris Wilson97d6d7a2016-08-04 07:52:22 +01001223 ret = i915_ggtt_enable_hw(dev_priv);
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001224 if (ret)
Wambui Karuga00376cc2020-01-31 12:34:12 +03001225 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
Ville Syrjäläac840ae2016-05-06 21:35:55 +03001226
Chris Wilsone9862092020-01-30 18:17:09 +00001227 i915_ggtt_resume(&dev_priv->ggtt);
Chris Wilsoncec5ca02019-09-09 12:00:08 +01001228
Anusha Srivatsa74ff1502021-05-18 14:34:43 -07001229 intel_dmc_ucode_resume(dev_priv);
Imre Deakf74ed082016-04-18 14:48:21 +03001230
Ville Syrjälä0f8d2a22020-10-05 20:14:41 +03001231 i915_restore_display(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +03001232 intel_pps_unlock_regs_wa(dev_priv);
Rafael J. Wysocki61caf872010-02-18 23:06:27 +01001233
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02001234 intel_init_pch_refclk(dev_priv);
Chris Wilson1833b132012-05-09 11:56:28 +01001235
Peter Antoine364aece2015-05-11 08:50:45 +01001236 /*
1237 * Interrupts have to be enabled before any batches are run. If not the
1238 * GPU will hang. i915_gem_init_hw() will initiate batches to
1239 * update/restore the context.
1240 *
Imre Deak908764f2016-11-29 21:40:29 +02001241 * drm_mode_config_reset() needs AUX interrupts.
1242 *
Peter Antoine364aece2015-05-11 08:50:45 +01001243 * Modeset enabling in intel_modeset_init_hw() also needs working
1244 * interrupts.
1245 */
1246 intel_runtime_pm_enable_interrupts(dev_priv);
1247
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001248 if (HAS_DISPLAY(dev_priv))
1249 drm_mode_config_reset(dev);
Imre Deak908764f2016-11-29 21:40:29 +02001250
Chris Wilson37cd3302017-11-12 11:27:38 +00001251 i915_gem_resume(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001252
Jani Nikula6cd02e72019-09-20 21:54:21 +03001253 intel_modeset_init_hw(dev_priv);
Ville Syrjälä675f7ff2017-11-16 18:02:15 +02001254 intel_init_clock_gating(dev_priv);
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001255 intel_hpd_init(dev_priv);
Daniel Vetterd5818932015-02-23 12:03:26 +01001256
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001257 /* MST sideband requires HPD interrupts enabled */
Ville Syrjälä1a4313d2018-07-05 19:43:52 +03001258 intel_dp_mst_resume(dev_priv);
Lyudea16b7652016-03-11 10:57:01 -05001259 intel_display_resume(dev);
1260
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001261 intel_hpd_poll_disable(dev_priv);
José Roberto de Souza5df7bd12021-04-08 13:31:50 -07001262 if (HAS_DISPLAY(dev_priv))
1263 drm_kms_helper_poll_enable(dev);
Lyudee0b70062016-11-01 21:06:30 -04001264
Chris Wilsona950adc2018-10-30 11:05:54 +00001265 intel_opregion_resume(dev_priv);
Chris Wilson44834a62010-08-19 16:09:23 +01001266
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001267 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
Jesse Barnes073f34d2012-11-02 11:13:59 -07001268
Imre Deak2cd9a682018-08-16 15:37:57 +03001269 intel_power_domains_enable(dev_priv);
1270
Colin Xu385fc382020-10-27 12:54:06 +08001271 intel_gvt_resume(dev_priv);
1272
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001273 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak1f814da2015-12-16 02:52:19 +02001274
Chris Wilson074c6ad2014-04-09 09:19:43 +01001275 return 0;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001276}
1277
Imre Deak5e365c32014-10-23 19:23:25 +03001278static int i915_drm_resume_early(struct drm_device *dev)
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001279{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001280 struct drm_i915_private *dev_priv = to_i915(dev);
Thomas Zimmermann8ff54462021-01-28 14:31:23 +01001281 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
Imre Deak44410cd2016-04-18 14:45:54 +03001282 int ret;
Imre Deak36d61e62014-10-23 19:23:24 +03001283
Imre Deak76c4b252014-04-01 19:55:22 +03001284 /*
1285 * We have a resume ordering issue with the snd-hda driver also
1286 * requiring our device to be power up. Due to the lack of a
1287 * parent/child relationship we currently solve this with an early
1288 * resume hook.
1289 *
1290 * FIXME: This should be solved with a special hdmi sink device or
1291 * similar so that power domains can be employed.
1292 */
Imre Deak44410cd2016-04-18 14:45:54 +03001293
1294 /*
1295 * Note that we need to set the power state explicitly, since we
1296 * powered off the device during freeze and the PCI core won't power
1297 * it back up for us during thaw. Powering off the device during
1298 * freeze is not a hard requirement though, and during the
1299 * suspend/resume phases the PCI core makes sure we get here with the
1300 * device powered on. So in case we change our freeze logic and keep
1301 * the device powered we can also remove the following set power state
1302 * call.
1303 */
David Weinehall52a05c32016-08-22 13:32:44 +03001304 ret = pci_set_power_state(pdev, PCI_D0);
Imre Deak44410cd2016-04-18 14:45:54 +03001305 if (ret) {
Wambui Karuga00376cc2020-01-31 12:34:12 +03001306 drm_err(&dev_priv->drm,
1307 "failed to set PCI D0 power state (%d)\n", ret);
Imre Deak2cd9a682018-08-16 15:37:57 +03001308 return ret;
Imre Deak44410cd2016-04-18 14:45:54 +03001309 }
1310
1311 /*
1312 * Note that pci_enable_device() first enables any parent bridge
1313 * device and only then sets the power state for this device. The
1314 * bridge enabling is a nop though, since bridge devices are resumed
1315 * first. The order of enabling power and enabling the device is
1316 * imposed by the PCI core as described above, so here we preserve the
1317 * same order for the freeze/thaw phases.
1318 *
1319 * TODO: eventually we should remove pci_disable_device() /
1320 * pci_enable_enable_device() from suspend/resume. Due to how they
1321 * depend on the device enable refcount we can't anyway depend on them
1322 * disabling/enabling the device.
1323 */
Imre Deak2cd9a682018-08-16 15:37:57 +03001324 if (pci_enable_device(pdev))
1325 return -EIO;
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001326
David Weinehall52a05c32016-08-22 13:32:44 +03001327 pci_set_master(pdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001328
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001329 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak1f814da2015-12-16 02:52:19 +02001330
Jani Nikulafb5f4322020-02-12 16:40:57 +02001331 ret = vlv_resume_prepare(dev_priv, false);
Imre Deak36d61e62014-10-23 19:23:24 +03001332 if (ret)
Wambui Karuga00376cc2020-01-31 12:34:12 +03001333 drm_err(&dev_priv->drm,
Jani Nikulafb5f4322020-02-12 16:40:57 +02001334 "Resume prepare failed: %d, continuing anyway\n", ret);
Imre Deak36d61e62014-10-23 19:23:24 +03001335
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001336 intel_uncore_resume_early(&dev_priv->uncore);
1337
Tvrtko Ursulineaf522f2019-06-21 08:07:44 +01001338 intel_gt_check_and_clear_faults(&dev_priv->gt);
Paulo Zanoniefee8332014-10-27 17:54:33 -02001339
Rodrigo Vivi071b68c2019-08-06 15:22:08 +03001340 intel_display_power_resume_early(dev_priv);
Paulo Zanoniefee8332014-10-27 17:54:33 -02001341
Imre Deak2cd9a682018-08-16 15:37:57 +03001342 intel_power_domains_resume(dev_priv);
Imre Deakbc872292015-11-18 17:32:30 +02001343
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001344 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
Imre Deak6e35e8a2016-04-18 10:04:19 +03001345
Imre Deak36d61e62014-10-23 19:23:24 +03001346 return ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001347}
1348
Jani Nikula63bf8302019-10-04 15:20:18 +03001349int i915_resume_switcheroo(struct drm_i915_private *i915)
Imre Deak76c4b252014-04-01 19:55:22 +03001350{
Imre Deak50a00722014-10-23 19:23:17 +03001351 int ret;
Imre Deak76c4b252014-04-01 19:55:22 +03001352
Chris Wilson361f9dc2019-08-06 08:42:19 +01001353 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03001354 return 0;
1355
Chris Wilson361f9dc2019-08-06 08:42:19 +01001356 ret = i915_drm_resume_early(&i915->drm);
Imre Deak50a00722014-10-23 19:23:17 +03001357 if (ret)
1358 return ret;
1359
Chris Wilson361f9dc2019-08-06 08:42:19 +01001360 return i915_drm_resume(&i915->drm);
Imre Deak5a175142014-10-23 19:23:18 +03001361}
1362
Chris Wilson73b66f82018-05-25 10:26:29 +01001363static int i915_pm_prepare(struct device *kdev)
1364{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001365 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Chris Wilson73b66f82018-05-25 10:26:29 +01001366
Chris Wilson361f9dc2019-08-06 08:42:19 +01001367 if (!i915) {
Chris Wilson73b66f82018-05-25 10:26:29 +01001368 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1369 return -ENODEV;
1370 }
1371
Chris Wilson361f9dc2019-08-06 08:42:19 +01001372 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Chris Wilson73b66f82018-05-25 10:26:29 +01001373 return 0;
1374
Chris Wilson361f9dc2019-08-06 08:42:19 +01001375 return i915_drm_prepare(&i915->drm);
Chris Wilson73b66f82018-05-25 10:26:29 +01001376}
1377
David Weinehallc49d13e2016-08-22 13:32:42 +03001378static int i915_pm_suspend(struct device *kdev)
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001379{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001380 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001381
Chris Wilson361f9dc2019-08-06 08:42:19 +01001382 if (!i915) {
David Weinehallc49d13e2016-08-22 13:32:42 +03001383 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001384 return -ENODEV;
1385 }
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001386
Chris Wilson361f9dc2019-08-06 08:42:19 +01001387 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Dave Airlie5bcf7192010-12-07 09:20:40 +10001388 return 0;
1389
Chris Wilson361f9dc2019-08-06 08:42:19 +01001390 return i915_drm_suspend(&i915->drm);
Imre Deak76c4b252014-04-01 19:55:22 +03001391}
1392
David Weinehallc49d13e2016-08-22 13:32:42 +03001393static int i915_pm_suspend_late(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03001394{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001395 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Imre Deak76c4b252014-04-01 19:55:22 +03001396
1397 /*
Damien Lespiauc965d9952015-05-18 19:53:48 +01001398 * We have a suspend ordering issue with the snd-hda driver also
Imre Deak76c4b252014-04-01 19:55:22 +03001399 * requiring our device to be power up. Due to the lack of a
1400 * parent/child relationship we currently solve this with an late
1401 * suspend hook.
1402 *
1403 * FIXME: This should be solved with a special hdmi sink device or
1404 * similar so that power domains can be employed.
1405 */
Chris Wilson361f9dc2019-08-06 08:42:19 +01001406 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak76c4b252014-04-01 19:55:22 +03001407 return 0;
Kristian Høgsberg112b7152009-01-04 16:55:33 -05001408
Chris Wilson361f9dc2019-08-06 08:42:19 +01001409 return i915_drm_suspend_late(&i915->drm, false);
Imre Deakab3be732015-03-02 13:04:41 +02001410}
1411
David Weinehallc49d13e2016-08-22 13:32:42 +03001412static int i915_pm_poweroff_late(struct device *kdev)
Imre Deakab3be732015-03-02 13:04:41 +02001413{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001414 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Imre Deakab3be732015-03-02 13:04:41 +02001415
Chris Wilson361f9dc2019-08-06 08:42:19 +01001416 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deakab3be732015-03-02 13:04:41 +02001417 return 0;
1418
Chris Wilson361f9dc2019-08-06 08:42:19 +01001419 return i915_drm_suspend_late(&i915->drm, true);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001420}
1421
David Weinehallc49d13e2016-08-22 13:32:42 +03001422static int i915_pm_resume_early(struct device *kdev)
Imre Deak76c4b252014-04-01 19:55:22 +03001423{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001424 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Imre Deak76c4b252014-04-01 19:55:22 +03001425
Chris Wilson361f9dc2019-08-06 08:42:19 +01001426 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03001427 return 0;
1428
Chris Wilson361f9dc2019-08-06 08:42:19 +01001429 return i915_drm_resume_early(&i915->drm);
Imre Deak76c4b252014-04-01 19:55:22 +03001430}
1431
David Weinehallc49d13e2016-08-22 13:32:42 +03001432static int i915_pm_resume(struct device *kdev)
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001433{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001434 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Rafael J. Wysocki84b79f82010-02-07 21:48:24 +01001435
Chris Wilson361f9dc2019-08-06 08:42:19 +01001436 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
Imre Deak097dd832014-10-23 19:23:19 +03001437 return 0;
1438
Chris Wilson361f9dc2019-08-06 08:42:19 +01001439 return i915_drm_resume(&i915->drm);
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001440}
1441
Chris Wilson1f19ac22016-05-14 07:26:32 +01001442/* freeze: before creating the hibernation_image */
David Weinehallc49d13e2016-08-22 13:32:42 +03001443static int i915_pm_freeze(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001444{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001445 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001446 int ret;
1447
Chris Wilson361f9dc2019-08-06 08:42:19 +01001448 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1449 ret = i915_drm_suspend(&i915->drm);
Imre Deakdd9f31c2017-08-16 17:46:07 +03001450 if (ret)
1451 return ret;
1452 }
Chris Wilson6a800ea2016-09-21 14:51:07 +01001453
Chris Wilson361f9dc2019-08-06 08:42:19 +01001454 ret = i915_gem_freeze(i915);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001455 if (ret)
1456 return ret;
1457
1458 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01001459}
1460
David Weinehallc49d13e2016-08-22 13:32:42 +03001461static int i915_pm_freeze_late(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001462{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001463 struct drm_i915_private *i915 = kdev_to_i915(kdev);
Chris Wilson461fb992016-05-14 07:26:33 +01001464 int ret;
1465
Chris Wilson361f9dc2019-08-06 08:42:19 +01001466 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1467 ret = i915_drm_suspend_late(&i915->drm, true);
Imre Deakdd9f31c2017-08-16 17:46:07 +03001468 if (ret)
1469 return ret;
1470 }
Chris Wilson461fb992016-05-14 07:26:33 +01001471
Chris Wilson361f9dc2019-08-06 08:42:19 +01001472 ret = i915_gem_freeze_late(i915);
Chris Wilson461fb992016-05-14 07:26:33 +01001473 if (ret)
1474 return ret;
1475
1476 return 0;
Chris Wilson1f19ac22016-05-14 07:26:32 +01001477}
1478
1479/* thaw: called after creating the hibernation image, but before turning off. */
David Weinehallc49d13e2016-08-22 13:32:42 +03001480static int i915_pm_thaw_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001481{
David Weinehallc49d13e2016-08-22 13:32:42 +03001482 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001483}
1484
David Weinehallc49d13e2016-08-22 13:32:42 +03001485static int i915_pm_thaw(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001486{
David Weinehallc49d13e2016-08-22 13:32:42 +03001487 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001488}
1489
1490/* restore: called after loading the hibernation image. */
David Weinehallc49d13e2016-08-22 13:32:42 +03001491static int i915_pm_restore_early(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001492{
David Weinehallc49d13e2016-08-22 13:32:42 +03001493 return i915_pm_resume_early(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001494}
1495
David Weinehallc49d13e2016-08-22 13:32:42 +03001496static int i915_pm_restore(struct device *kdev)
Chris Wilson1f19ac22016-05-14 07:26:32 +01001497{
David Weinehallc49d13e2016-08-22 13:32:42 +03001498 return i915_pm_resume(kdev);
Chris Wilson1f19ac22016-05-14 07:26:32 +01001499}
1500
David Weinehallc49d13e2016-08-22 13:32:42 +03001501static int intel_runtime_suspend(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02001502{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001503 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
Daniele Ceraolo Spurio1bf676c2019-06-13 16:21:52 -07001504 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
Jani Nikulafb5f4322020-02-12 16:40:57 +02001505 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02001506
Pankaj Bharadiya48a1b8d2020-01-15 09:14:53 +05301507 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03001508 return -ENODEV;
1509
Wambui Karuga00376cc2020-01-31 12:34:12 +03001510 drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02001511
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001512 disable_rpm_wakeref_asserts(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001513
Imre Deakd6102972014-05-07 19:57:49 +03001514 /*
1515 * We are safe here against re-faults, since the fault handler takes
1516 * an RPM reference.
1517 */
Chris Wilson7c108fd2016-10-24 13:42:18 +01001518 i915_gem_runtime_suspend(dev_priv);
Imre Deakd6102972014-05-07 19:57:49 +03001519
Daniele Ceraolo Spurio9dfe3452019-07-31 17:57:09 -07001520 intel_gt_runtime_suspend(&dev_priv->gt);
Alex Daia1c41992015-09-30 09:46:37 -07001521
Imre Deak2eb52522014-11-19 15:30:05 +02001522 intel_runtime_pm_disable_interrupts(dev_priv);
Imre Deakb5478bc2014-04-14 20:24:37 +03001523
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001524 intel_uncore_suspend(&dev_priv->uncore);
Hans de Goede01c799c2017-11-14 14:55:18 +01001525
Rodrigo Vivi071b68c2019-08-06 15:22:08 +03001526 intel_display_power_suspend(dev_priv);
1527
Jani Nikulafb5f4322020-02-12 16:40:57 +02001528 ret = vlv_suspend_complete(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001529 if (ret) {
Wambui Karuga00376cc2020-01-31 12:34:12 +03001530 drm_err(&dev_priv->drm,
1531 "Runtime suspend failed, disabling it (%d)\n", ret);
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001532 intel_uncore_runtime_resume(&dev_priv->uncore);
Hans de Goede01c799c2017-11-14 14:55:18 +01001533
Daniel Vetterb9632912014-09-30 10:56:44 +02001534 intel_runtime_pm_enable_interrupts(dev_priv);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001535
Daniele Ceraolo Spurio9dfe3452019-07-31 17:57:09 -07001536 intel_gt_runtime_resume(&dev_priv->gt);
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05301537
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001538 enable_rpm_wakeref_asserts(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001539
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001540 return ret;
1541 }
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03001542
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001543 enable_rpm_wakeref_asserts(rpm);
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001544 intel_runtime_pm_driver_release(rpm);
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02001545
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001546 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
Wambui Karuga00376cc2020-01-31 12:34:12 +03001547 drm_err(&dev_priv->drm,
1548 "Unclaimed access detected prior to suspending\n");
Mika Kuoppala55ec45c2015-12-15 16:25:08 +02001549
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001550 rpm->suspended = true;
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08001551
1552 /*
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03001553 * FIXME: We really should find a document that references the arguments
1554 * used below!
Kristen Carlson Accardi1fb23622014-01-14 15:36:15 -08001555 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001556 if (IS_BROADWELL(dev_priv)) {
Paulo Zanonid37ae192015-07-30 18:20:29 -03001557 /*
1558 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1559 * being detected, and the call we do at intel_runtime_resume()
1560 * won't be able to restore them. Since PCI_D3hot matches the
1561 * actual specification and appears to be working, use it.
1562 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001563 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
Paulo Zanonid37ae192015-07-30 18:20:29 -03001564 } else {
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03001565 /*
1566 * current versions of firmware which depend on this opregion
1567 * notification have repurposed the D1 definition to mean
1568 * "runtime suspended" vs. what you would normally expect (D3)
1569 * to distinguish it from notifications that might be sent via
1570 * the suspend path.
1571 */
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001572 intel_opregion_notify_adapter(dev_priv, PCI_D1);
Paulo Zanonic8a0bd42014-08-21 17:09:38 -03001573 }
Paulo Zanoni8a187452013-12-06 20:32:13 -02001574
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001575 assert_forcewakes_inactive(&dev_priv->uncore);
Chris Wilsondc9fb092015-01-16 11:34:34 +02001576
Ander Conselvan de Oliveira21d6e0b2017-01-20 16:28:43 +02001577 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001578 intel_hpd_poll_enable(dev_priv);
Lyude19625e82016-06-21 17:03:44 -04001579
Wambui Karuga00376cc2020-01-31 12:34:12 +03001580 drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02001581 return 0;
1582}
1583
David Weinehallc49d13e2016-08-22 13:32:42 +03001584static int intel_runtime_resume(struct device *kdev)
Paulo Zanoni8a187452013-12-06 20:32:13 -02001585{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001586 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
Daniele Ceraolo Spurio1bf676c2019-06-13 16:21:52 -07001587 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
Jani Nikulafb5f4322020-02-12 16:40:57 +02001588 int ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02001589
Pankaj Bharadiya48a1b8d2020-01-15 09:14:53 +05301590 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
Imre Deak604effb2014-08-26 13:26:56 +03001591 return -ENODEV;
Paulo Zanoni8a187452013-12-06 20:32:13 -02001592
Wambui Karuga00376cc2020-01-31 12:34:12 +03001593 drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02001594
Pankaj Bharadiya48a1b8d2020-01-15 09:14:53 +05301595 drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001596 disable_rpm_wakeref_asserts(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001597
Chris Wilson6f9f4b72016-05-23 15:08:09 +01001598 intel_opregion_notify_adapter(dev_priv, PCI_D0);
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001599 rpm->suspended = false;
Daniele Ceraolo Spurio2cf7bf62019-03-25 14:49:34 -07001600 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
Wambui Karuga00376cc2020-01-31 12:34:12 +03001601 drm_dbg(&dev_priv->drm,
1602 "Unclaimed access during suspend, bios?\n");
Paulo Zanoni8a187452013-12-06 20:32:13 -02001603
Rodrigo Vivi071b68c2019-08-06 15:22:08 +03001604 intel_display_power_resume(dev_priv);
1605
Jani Nikulafb5f4322020-02-12 16:40:57 +02001606 ret = vlv_resume_prepare(dev_priv, true);
Paulo Zanoni1a5df182014-10-27 17:54:32 -02001607
Daniele Ceraolo Spuriof7de5022019-03-19 11:35:37 -07001608 intel_uncore_runtime_resume(&dev_priv->uncore);
Hans de Goedebedf4d72017-11-14 14:55:17 +01001609
Sagar Arun Kamble1ed21cb2018-01-24 21:16:57 +05301610 intel_runtime_pm_enable_interrupts(dev_priv);
1611
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001612 /*
1613 * No point of rolling back things in case of an error, as the best
1614 * we can do is to hope that things will still work (and disable RPM).
1615 */
Daniele Ceraolo Spurio9dfe3452019-07-31 17:57:09 -07001616 intel_gt_runtime_resume(&dev_priv->gt);
Imre Deak92b806d2014-04-14 20:24:39 +03001617
Ville Syrjälä08d8a232015-08-27 23:56:08 +03001618 /*
1619 * On VLV/CHV display interrupts are part of the display
1620 * power well, so hpd is reinitialized from there. For
1621 * everyone else do it here.
1622 */
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001623 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä08d8a232015-08-27 23:56:08 +03001624 intel_hpd_init(dev_priv);
Ville Syrjälä4c8d4652020-10-13 21:11:37 +03001625 intel_hpd_poll_disable(dev_priv);
1626 }
Ville Syrjälä08d8a232015-08-27 23:56:08 +03001627
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +05301628 intel_enable_ipc(dev_priv);
1629
Daniele Ceraolo Spurio91026502019-06-13 16:21:51 -07001630 enable_rpm_wakeref_asserts(rpm);
Imre Deak1f814da2015-12-16 02:52:19 +02001631
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001632 if (ret)
Wambui Karuga00376cc2020-01-31 12:34:12 +03001633 drm_err(&dev_priv->drm,
1634 "Runtime resume failed, disabling it (%d)\n", ret);
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001635 else
Wambui Karuga00376cc2020-01-31 12:34:12 +03001636 drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
Imre Deak0ab9cfe2014-04-15 16:39:45 +03001637
1638 return ret;
Paulo Zanoni8a187452013-12-06 20:32:13 -02001639}
1640
Chris Wilson42f55512016-06-24 14:00:26 +01001641const struct dev_pm_ops i915_pm_ops = {
Imre Deak5545dbb2014-10-23 19:23:28 +03001642 /*
1643 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1644 * PMSG_RESUME]
1645 */
Chris Wilson73b66f82018-05-25 10:26:29 +01001646 .prepare = i915_pm_prepare,
Akshay Joshi0206e352011-08-16 15:34:10 -04001647 .suspend = i915_pm_suspend,
Imre Deak76c4b252014-04-01 19:55:22 +03001648 .suspend_late = i915_pm_suspend_late,
1649 .resume_early = i915_pm_resume_early,
Akshay Joshi0206e352011-08-16 15:34:10 -04001650 .resume = i915_pm_resume,
Imre Deak5545dbb2014-10-23 19:23:28 +03001651
1652 /*
1653 * S4 event handlers
1654 * @freeze, @freeze_late : called (1) before creating the
1655 * hibernation image [PMSG_FREEZE] and
1656 * (2) after rebooting, before restoring
1657 * the image [PMSG_QUIESCE]
1658 * @thaw, @thaw_early : called (1) after creating the hibernation
1659 * image, before writing it [PMSG_THAW]
1660 * and (2) after failing to create or
1661 * restore the image [PMSG_RECOVER]
1662 * @poweroff, @poweroff_late: called after writing the hibernation
1663 * image, before rebooting [PMSG_HIBERNATE]
1664 * @restore, @restore_early : called after rebooting and restoring the
1665 * hibernation image [PMSG_RESTORE]
1666 */
Chris Wilson1f19ac22016-05-14 07:26:32 +01001667 .freeze = i915_pm_freeze,
1668 .freeze_late = i915_pm_freeze_late,
1669 .thaw_early = i915_pm_thaw_early,
1670 .thaw = i915_pm_thaw,
Imre Deak36d61e62014-10-23 19:23:24 +03001671 .poweroff = i915_pm_suspend,
Imre Deakab3be732015-03-02 13:04:41 +02001672 .poweroff_late = i915_pm_poweroff_late,
Chris Wilson1f19ac22016-05-14 07:26:32 +01001673 .restore_early = i915_pm_restore_early,
1674 .restore = i915_pm_restore,
Imre Deak5545dbb2014-10-23 19:23:28 +03001675
1676 /* S0ix (via runtime suspend) event handlers */
Paulo Zanoni97bea202014-03-07 20:12:33 -03001677 .runtime_suspend = intel_runtime_suspend,
1678 .runtime_resume = intel_runtime_resume,
Zhenyu Wangcbda12d2009-12-16 13:36:10 +08001679};
1680
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001681static const struct file_operations i915_driver_fops = {
1682 .owner = THIS_MODULE,
1683 .open = drm_open,
Chris Wilson7a2c65dd2020-01-24 12:56:26 +00001684 .release = drm_release_noglobal,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001685 .unlocked_ioctl = drm_ioctl,
Abdiel Janulguecc662122019-12-04 12:00:32 +00001686 .mmap = i915_gem_mmap,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001687 .poll = drm_poll,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001688 .read = drm_read,
Jani Nikula062705b2020-02-27 19:00:45 +02001689 .compat_ioctl = i915_ioc32_compat_ioctl,
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001690 .llseek = noop_llseek,
1691};
1692
Chris Wilson0673ad42016-06-24 14:00:22 +01001693static int
1694i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1695 struct drm_file *file)
1696{
1697 return -ENODEV;
1698}
1699
1700static const struct drm_ioctl_desc i915_ioctls[] = {
1701 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1702 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1703 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1704 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1705 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1706 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
Christian Königb972fff2019-04-17 13:25:24 +02001707 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001708 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1709 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1710 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1711 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1712 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1713 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1714 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1715 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1716 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1717 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1718 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
Jason Ekstrandb5b6f6a62021-03-17 18:40:10 -05001719 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
Christian Königb972fff2019-04-17 13:25:24 +02001720 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001721 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1722 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
Christian Königb972fff2019-04-17 13:25:24 +02001723 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001724 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1725 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
Christian Königb972fff2019-04-17 13:25:24 +02001726 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001727 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1728 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1729 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1730 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1731 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1732 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
Abdiel Janulguecc662122019-12-04 12:00:32 +00001733 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001734 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1735 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
Chris Wilson111dbca2017-01-10 12:10:44 +00001736 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1737 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001738 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
Ville Syrjälä6a20fe72018-02-07 18:48:41 +02001739 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
Chris Wilson0673ad42016-06-24 14:00:22 +01001740 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
Daniel Vetter0cd54b02018-04-20 08:51:57 +02001741 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1742 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1743 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1744 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
Christian Königb972fff2019-04-17 13:25:24 +02001745 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
Chris Wilsonb9171542019-03-22 09:23:24 +00001746 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001747 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1748 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1749 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1750 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1751 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1752 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
Robert Braggeec688e2016-11-07 19:49:47 +00001753 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
Emil Velikovb40237562019-05-22 16:47:01 +01001754 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1755 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1756 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
Chris Wilson7f3f317a2019-05-21 22:11:25 +01001757 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1758 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
Chris Wilson0673ad42016-06-24 14:00:22 +01001759};
1760
Daniel Vetter70a59dd2020-11-04 11:04:24 +01001761static const struct drm_driver driver = {
Michael Witten0c547812011-08-25 17:55:54 +00001762 /* Don't use MTRRs here; the Xserver or userspace app should
1763 * deal with them for Intel hardware.
Dave Airlie792d2b92005-11-11 23:30:27 +11001764 */
Eric Anholt673a3942008-07-30 12:06:12 -07001765 .driver_features =
Daniel Vetter0424fda2019-06-17 17:39:24 +02001766 DRIVER_GEM |
Lionel Landwerlin13149e82020-08-04 11:59:54 +03001767 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1768 DRIVER_SYNCOBJ_TIMELINE,
Chris Wilsoncad36882017-02-10 16:35:21 +00001769 .release = i915_driver_release,
Eric Anholt673a3942008-07-30 12:06:12 -07001770 .open = i915_driver_open,
Dave Airlie22eae942005-11-10 22:16:34 +11001771 .lastclose = i915_driver_lastclose,
Eric Anholt673a3942008-07-30 12:06:12 -07001772 .postclose = i915_driver_postclose,
Rafael J. Wysockid8e29202010-01-09 00:45:33 +01001773
Daniel Vetter1286ff72012-05-10 15:25:09 +02001774 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1775 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
Daniel Vetter1286ff72012-05-10 15:25:09 +02001776 .gem_prime_import = i915_gem_prime_import,
1777
Dave Airlieff72145b2011-02-07 12:16:14 +10001778 .dumb_create = i915_gem_dumb_create,
Abdiel Janulguecc662122019-12-04 12:00:32 +00001779 .dumb_map_offset = i915_gem_dumb_mmap_offset,
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 .ioctls = i915_ioctls,
Chris Wilson0673ad42016-06-24 14:00:22 +01001782 .num_ioctls = ARRAY_SIZE(i915_ioctls),
Arjan van de Vene08e96d2011-10-31 07:28:57 -07001783 .fops = &i915_driver_fops,
Dave Airlie22eae942005-11-10 22:16:34 +11001784 .name = DRIVER_NAME,
1785 .desc = DRIVER_DESC,
1786 .date = DRIVER_DATE,
1787 .major = DRIVER_MAJOR,
1788 .minor = DRIVER_MINOR,
1789 .patchlevel = DRIVER_PATCHLEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790};