Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
| 2 | */ |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 3 | /* |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 6 | * All Rights Reserved. |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * copy of this software and associated documentation files (the |
| 10 | * "Software"), to deal in the Software without restriction, including |
| 11 | * without limitation the rights to use, copy, modify, merge, publish, |
| 12 | * distribute, sub license, and/or sell copies of the Software, and to |
| 13 | * permit persons to whom the Software is furnished to do so, subject to |
| 14 | * the following conditions: |
| 15 | * |
| 16 | * The above copyright notice and this permission notice (including the |
| 17 | * next paragraph) shall be included in all copies or substantial portions |
| 18 | * of the Software. |
| 19 | * |
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 27 | * |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 28 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Jesse Barnes | e5747e3 | 2014-06-12 08:35:47 -0700 | [diff] [blame] | 30 | #include <linux/acpi.h> |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 31 | #include <linux/device.h> |
| 32 | #include <linux/oom.h> |
| 33 | #include <linux/module.h> |
| 34 | #include <linux/pci.h> |
| 35 | #include <linux/pm.h> |
| 36 | #include <linux/pm_runtime.h> |
| 37 | #include <linux/pnp.h> |
| 38 | #include <linux/slab.h> |
| 39 | #include <linux/vgaarb.h> |
| 40 | #include <linux/vga_switcheroo.h> |
| 41 | #include <linux/vt.h> |
| 42 | #include <acpi/video.h> |
| 43 | |
Maarten Lankhorst | a667fb4 | 2016-12-15 15:29:44 +0100 | [diff] [blame] | 44 | #include <drm/drm_atomic_helper.h> |
Sam Ravnborg | d0e9359 | 2019-01-26 13:25:24 +0100 | [diff] [blame] | 45 | #include <drm/drm_ioctl.h> |
| 46 | #include <drm/drm_irq.h> |
| 47 | #include <drm/drm_probe_helper.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 48 | #include <drm/i915_drm.h> |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 49 | |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 50 | #include "display/intel_acpi.h" |
| 51 | #include "display/intel_audio.h" |
| 52 | #include "display/intel_bw.h" |
| 53 | #include "display/intel_cdclk.h" |
Jani Nikula | 1d455f8 | 2019-08-06 14:39:33 +0300 | [diff] [blame] | 54 | #include "display/intel_display_types.h" |
Jani Nikula | 379bc10 | 2019-06-13 11:44:15 +0300 | [diff] [blame] | 55 | #include "display/intel_dp.h" |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 56 | #include "display/intel_fbdev.h" |
Jani Nikula | 379bc10 | 2019-06-13 11:44:15 +0300 | [diff] [blame] | 57 | #include "display/intel_gmbus.h" |
Jani Nikula | df0566a | 2019-06-13 11:44:16 +0300 | [diff] [blame] | 58 | #include "display/intel_hotplug.h" |
| 59 | #include "display/intel_overlay.h" |
| 60 | #include "display/intel_pipe_crc.h" |
| 61 | #include "display/intel_sprite.h" |
Jani Nikula | 379bc10 | 2019-06-13 11:44:15 +0300 | [diff] [blame] | 62 | |
Chris Wilson | 10be98a | 2019-05-28 10:29:49 +0100 | [diff] [blame] | 63 | #include "gem/i915_gem_context.h" |
Chris Wilson | afa1308 | 2019-05-28 10:29:43 +0100 | [diff] [blame] | 64 | #include "gem/i915_gem_ioctls.h" |
Tvrtko Ursulin | 24635c5 | 2019-06-21 08:07:41 +0100 | [diff] [blame] | 65 | #include "gt/intel_gt.h" |
Chris Wilson | 79ffac85 | 2019-04-24 21:07:17 +0100 | [diff] [blame] | 66 | #include "gt/intel_gt_pm.h" |
Chris Wilson | 112ed2d | 2019-04-24 18:48:39 +0100 | [diff] [blame] | 67 | |
Jani Nikula | 2126d3e | 2019-05-02 18:02:43 +0300 | [diff] [blame] | 68 | #include "i915_debugfs.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #include "i915_drv.h" |
Jani Nikula | 440e2b3 | 2019-04-29 15:29:27 +0300 | [diff] [blame] | 70 | #include "i915_irq.h" |
Jani Nikula | 9c9082b | 2019-08-08 16:42:47 +0300 | [diff] [blame] | 71 | #include "i915_memcpy.h" |
Jani Nikula | db94e9f | 2019-08-08 16:42:44 +0300 | [diff] [blame] | 72 | #include "i915_perf.h" |
Lionel Landwerlin | a446ae2 | 2018-03-06 12:28:56 +0000 | [diff] [blame] | 73 | #include "i915_query.h" |
Jani Nikula | bdd1510 | 2019-08-08 16:42:46 +0300 | [diff] [blame] | 74 | #include "i915_suspend.h" |
Jani Nikula | be68261 | 2019-08-08 16:42:45 +0300 | [diff] [blame] | 75 | #include "i915_sysfs.h" |
Jani Nikula | 331c201 | 2019-04-05 14:00:03 +0300 | [diff] [blame] | 76 | #include "i915_trace.h" |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 77 | #include "i915_vgpu.h" |
Jani Nikula | 174594d | 2019-04-05 14:00:07 +0300 | [diff] [blame] | 78 | #include "intel_csr.h" |
Jani Nikula | 696173b | 2019-04-05 14:00:15 +0300 | [diff] [blame] | 79 | #include "intel_pm.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 81 | static struct drm_driver driver; |
| 82 | |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 83 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 84 | { |
Sinan Kaya | 57b29646 | 2017-11-27 11:57:46 -0500 | [diff] [blame] | 85 | int domain = pci_domain_nr(dev_priv->drm.pdev->bus); |
| 86 | |
| 87 | dev_priv->bridge_dev = |
| 88 | pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 89 | if (!dev_priv->bridge_dev) { |
| 90 | DRM_ERROR("bridge device not found\n"); |
| 91 | return -1; |
| 92 | } |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | /* Allocate space for the MCH regs if needed, return nonzero on error */ |
| 97 | static int |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 98 | intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 99 | { |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 100 | int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 101 | u32 temp_lo, temp_hi = 0; |
| 102 | u64 mchbar_addr; |
| 103 | int ret; |
| 104 | |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 105 | if (INTEL_GEN(dev_priv) >= 4) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 106 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
| 107 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); |
| 108 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
| 109 | |
| 110 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
| 111 | #ifdef CONFIG_PNP |
| 112 | if (mchbar_addr && |
| 113 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
| 114 | return 0; |
| 115 | #endif |
| 116 | |
| 117 | /* Get some space for it */ |
| 118 | dev_priv->mch_res.name = "i915 MCHBAR"; |
| 119 | dev_priv->mch_res.flags = IORESOURCE_MEM; |
| 120 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, |
| 121 | &dev_priv->mch_res, |
| 122 | MCHBAR_SIZE, MCHBAR_SIZE, |
| 123 | PCIBIOS_MIN_MEM, |
| 124 | 0, pcibios_align_resource, |
| 125 | dev_priv->bridge_dev); |
| 126 | if (ret) { |
| 127 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
| 128 | dev_priv->mch_res.start = 0; |
| 129 | return ret; |
| 130 | } |
| 131 | |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 132 | if (INTEL_GEN(dev_priv) >= 4) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 133 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
| 134 | upper_32_bits(dev_priv->mch_res.start)); |
| 135 | |
| 136 | pci_write_config_dword(dev_priv->bridge_dev, reg, |
| 137 | lower_32_bits(dev_priv->mch_res.start)); |
| 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
| 142 | static void |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 143 | intel_setup_mchbar(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 144 | { |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 145 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 146 | u32 temp; |
| 147 | bool enabled; |
| 148 | |
Tvrtko Ursulin | 920a14b | 2016-10-14 10:13:44 +0100 | [diff] [blame] | 149 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 150 | return; |
| 151 | |
| 152 | dev_priv->mchbar_need_disable = false; |
| 153 | |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 154 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 155 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
| 156 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
| 157 | } else { |
| 158 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
| 159 | enabled = temp & 1; |
| 160 | } |
| 161 | |
| 162 | /* If it's already enabled, don't have to do anything */ |
| 163 | if (enabled) |
| 164 | return; |
| 165 | |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 166 | if (intel_alloc_mchbar_resource(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 167 | return; |
| 168 | |
| 169 | dev_priv->mchbar_need_disable = true; |
| 170 | |
| 171 | /* Space is allocated or reserved, so enable it. */ |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 172 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 173 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
| 174 | temp | DEVEN_MCHBAR_EN); |
| 175 | } else { |
| 176 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
| 177 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | static void |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 182 | intel_teardown_mchbar(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 183 | { |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 184 | int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 185 | |
| 186 | if (dev_priv->mchbar_need_disable) { |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 187 | if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 188 | u32 deven_val; |
| 189 | |
| 190 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, |
| 191 | &deven_val); |
| 192 | deven_val &= ~DEVEN_MCHBAR_EN; |
| 193 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
| 194 | deven_val); |
| 195 | } else { |
| 196 | u32 mchbar_val; |
| 197 | |
| 198 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, |
| 199 | &mchbar_val); |
| 200 | mchbar_val &= ~1; |
| 201 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, |
| 202 | mchbar_val); |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | if (dev_priv->mch_res.start) |
| 207 | release_resource(&dev_priv->mch_res); |
| 208 | } |
| 209 | |
| 210 | /* true = enable decode, false = disable decoder */ |
| 211 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
| 212 | { |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 213 | struct drm_i915_private *dev_priv = cookie; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 214 | |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 215 | intel_modeset_vga_set_state(dev_priv, state); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 216 | if (state) |
| 217 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
| 218 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| 219 | else |
| 220 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| 221 | } |
| 222 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 223 | static int i915_resume_switcheroo(struct drm_i915_private *i915); |
| 224 | static int i915_suspend_switcheroo(struct drm_i915_private *i915, |
| 225 | pm_message_t state); |
Tvrtko Ursulin | 7f26cb8 | 2016-12-01 14:16:41 +0000 | [diff] [blame] | 226 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 227 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
| 228 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 229 | struct drm_i915_private *i915 = pdev_to_i915(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 230 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
| 231 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 232 | if (!i915) { |
| 233 | dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); |
| 234 | return; |
| 235 | } |
| 236 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 237 | if (state == VGA_SWITCHEROO_ON) { |
| 238 | pr_info("switched on\n"); |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 239 | i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 240 | /* i915 resume handler doesn't set to D0 */ |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 241 | pci_set_power_state(pdev, PCI_D0); |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 242 | i915_resume_switcheroo(i915); |
| 243 | i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 244 | } else { |
| 245 | pr_info("switched off\n"); |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 246 | i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 247 | i915_suspend_switcheroo(i915, pmm); |
| 248 | i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 249 | } |
| 250 | } |
| 251 | |
| 252 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) |
| 253 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 254 | struct drm_i915_private *i915 = pdev_to_i915(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 255 | |
| 256 | /* |
| 257 | * FIXME: open_count is protected by drm_global_mutex but that would lead to |
| 258 | * locking inversion with the driver load path. And the access here is |
| 259 | * completely racy anyway. So don't bother with locking for now. |
| 260 | */ |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 261 | return i915 && i915->drm.open_count == 0; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { |
| 265 | .set_gpu_state = i915_switcheroo_set_state, |
| 266 | .reprobe = NULL, |
| 267 | .can_switch = i915_switcheroo_can_switch, |
| 268 | }; |
| 269 | |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 270 | static int i915_driver_modeset_probe(struct drm_device *dev) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 271 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 272 | struct drm_i915_private *dev_priv = to_i915(dev); |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 273 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 274 | int ret; |
| 275 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 276 | if (i915_inject_probe_failure(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 277 | return -ENODEV; |
| 278 | |
José Roberto de Souza | e1bf094 | 2018-11-30 15:20:47 -0800 | [diff] [blame] | 279 | if (HAS_DISPLAY(dev_priv)) { |
José Roberto de Souza | 8d3bf1a | 2018-11-07 16:16:44 -0800 | [diff] [blame] | 280 | ret = drm_vblank_init(&dev_priv->drm, |
| 281 | INTEL_INFO(dev_priv)->num_pipes); |
| 282 | if (ret) |
| 283 | goto out; |
| 284 | } |
| 285 | |
Jani Nikula | 6657885 | 2017-03-10 15:27:57 +0200 | [diff] [blame] | 286 | intel_bios_init(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 287 | |
| 288 | /* If we have > 1 VGA cards, then we need to arbitrate access |
| 289 | * to the common VGA resources. |
| 290 | * |
| 291 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), |
| 292 | * then we do not take part in VGA arbitration and the |
| 293 | * vga_client_register() fails with -ENODEV. |
| 294 | */ |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 295 | ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 296 | if (ret && ret != -ENODEV) |
| 297 | goto out; |
| 298 | |
| 299 | intel_register_dsm_handler(); |
| 300 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 301 | ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 302 | if (ret) |
| 303 | goto cleanup_vga_client; |
| 304 | |
| 305 | /* must happen before intel_power_domains_init_hw() on VLV/CHV */ |
| 306 | intel_update_rawclk(dev_priv); |
| 307 | |
| 308 | intel_power_domains_init_hw(dev_priv, false); |
| 309 | |
| 310 | intel_csr_ucode_init(dev_priv); |
| 311 | |
| 312 | ret = intel_irq_install(dev_priv); |
| 313 | if (ret) |
| 314 | goto cleanup_csr; |
| 315 | |
Jani Nikula | 3ce2ea6 | 2019-05-02 18:02:47 +0300 | [diff] [blame] | 316 | intel_gmbus_setup(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 317 | |
| 318 | /* Important: The output setup functions called by modeset_init need |
| 319 | * working irqs for e.g. gmbus and dp aux transfers. */ |
Ville Syrjälä | b079bd17 | 2016-10-25 18:58:02 +0300 | [diff] [blame] | 320 | ret = intel_modeset_init(dev); |
| 321 | if (ret) |
| 322 | goto cleanup_irq; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 323 | |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 324 | ret = i915_gem_init(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 325 | if (ret) |
Chris Wilson | 73bad7c | 2018-07-10 10:44:21 +0100 | [diff] [blame] | 326 | goto cleanup_modeset; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 327 | |
José Roberto de Souza | 58db08a7 | 2018-11-07 16:16:47 -0800 | [diff] [blame] | 328 | intel_overlay_setup(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 329 | |
José Roberto de Souza | e1bf094 | 2018-11-30 15:20:47 -0800 | [diff] [blame] | 330 | if (!HAS_DISPLAY(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 331 | return 0; |
| 332 | |
| 333 | ret = intel_fbdev_init(dev); |
| 334 | if (ret) |
| 335 | goto cleanup_gem; |
| 336 | |
| 337 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
| 338 | intel_hpd_init(dev_priv); |
| 339 | |
José Roberto de Souza | a8147d0 | 2018-11-07 16:16:46 -0800 | [diff] [blame] | 340 | intel_init_ipc(dev_priv); |
| 341 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 342 | return 0; |
| 343 | |
| 344 | cleanup_gem: |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 345 | i915_gem_suspend(dev_priv); |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 346 | i915_gem_driver_remove(dev_priv); |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 347 | i915_gem_driver_release(dev_priv); |
Chris Wilson | 73bad7c | 2018-07-10 10:44:21 +0100 | [diff] [blame] | 348 | cleanup_modeset: |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 349 | intel_modeset_driver_remove(dev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 350 | cleanup_irq: |
Ville Syrjälä | b318b82 | 2019-06-20 13:33:34 +0300 | [diff] [blame] | 351 | intel_irq_uninstall(dev_priv); |
Jani Nikula | 3ce2ea6 | 2019-05-02 18:02:47 +0300 | [diff] [blame] | 352 | intel_gmbus_teardown(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 353 | cleanup_csr: |
| 354 | intel_csr_ucode_fini(dev_priv); |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 355 | intel_power_domains_driver_remove(dev_priv); |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 356 | vga_switcheroo_unregister_client(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 357 | cleanup_vga_client: |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 358 | vga_client_register(pdev, NULL, NULL, NULL); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 359 | out: |
| 360 | return ret; |
| 361 | } |
| 362 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 363 | static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
| 364 | { |
| 365 | struct apertures_struct *ap; |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 366 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 367 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
| 368 | bool primary; |
| 369 | int ret; |
| 370 | |
| 371 | ap = alloc_apertures(1); |
| 372 | if (!ap) |
| 373 | return -ENOMEM; |
| 374 | |
Matthew Auld | 73ebd50 | 2017-12-11 15:18:20 +0000 | [diff] [blame] | 375 | ap->ranges[0].base = ggtt->gmadr.start; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 376 | ap->ranges[0].size = ggtt->mappable_end; |
| 377 | |
| 378 | primary = |
| 379 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
| 380 | |
Daniel Vetter | 44adece | 2016-08-10 18:52:34 +0200 | [diff] [blame] | 381 | ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 382 | |
| 383 | kfree(ap); |
| 384 | |
| 385 | return ret; |
| 386 | } |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 387 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 388 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
| 389 | { |
| 390 | /* |
| 391 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), |
| 392 | * CHV x1 PHY (DP/HDMI D) |
| 393 | * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) |
| 394 | */ |
| 395 | if (IS_CHERRYVIEW(dev_priv)) { |
| 396 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; |
| 397 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; |
| 398 | } else if (IS_VALLEYVIEW(dev_priv)) { |
| 399 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) |
| 404 | { |
| 405 | /* |
| 406 | * The i915 workqueue is primarily used for batched retirement of |
| 407 | * requests (and thus managing bo) once the task has been completed |
Chris Wilson | e61e0f5 | 2018-02-21 09:56:36 +0000 | [diff] [blame] | 408 | * by the GPU. i915_retire_requests() is called directly when we |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 409 | * need high-priority retirement, such as waiting for an explicit |
| 410 | * bo. |
| 411 | * |
| 412 | * It is also used for periodic low-priority events, such as |
| 413 | * idle-timers and recording error state. |
| 414 | * |
| 415 | * All tasks on the workqueue are expected to acquire the dev mutex |
| 416 | * so there is no point in running more than one instance of the |
| 417 | * workqueue at any time. Use an ordered one. |
| 418 | */ |
| 419 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
| 420 | if (dev_priv->wq == NULL) |
| 421 | goto out_err; |
| 422 | |
| 423 | dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); |
| 424 | if (dev_priv->hotplug.dp_wq == NULL) |
| 425 | goto out_free_wq; |
| 426 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 427 | return 0; |
| 428 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 429 | out_free_wq: |
| 430 | destroy_workqueue(dev_priv->wq); |
| 431 | out_err: |
| 432 | DRM_ERROR("Failed to allocate workqueues.\n"); |
| 433 | |
| 434 | return -ENOMEM; |
| 435 | } |
| 436 | |
| 437 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) |
| 438 | { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 439 | destroy_workqueue(dev_priv->hotplug.dp_wq); |
| 440 | destroy_workqueue(dev_priv->wq); |
| 441 | } |
| 442 | |
Paulo Zanoni | 4fc7e84 | 2016-09-26 15:07:52 +0300 | [diff] [blame] | 443 | /* |
| 444 | * We don't keep the workarounds for pre-production hardware, so we expect our |
| 445 | * driver to fail on these machines in one way or another. A little warning on |
| 446 | * dmesg may help both the user and the bug triagers. |
Chris Wilson | 6a7a6a9 | 2017-11-17 10:26:35 +0000 | [diff] [blame] | 447 | * |
| 448 | * Our policy for removing pre-production workarounds is to keep the |
| 449 | * current gen workarounds as a guide to the bring-up of the next gen |
| 450 | * (workarounds have a habit of persisting!). Anything older than that |
| 451 | * should be removed along with the complications they introduce. |
Paulo Zanoni | 4fc7e84 | 2016-09-26 15:07:52 +0300 | [diff] [blame] | 452 | */ |
| 453 | static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) |
| 454 | { |
Chris Wilson | 248a124 | 2017-01-30 10:44:56 +0000 | [diff] [blame] | 455 | bool pre = false; |
| 456 | |
| 457 | pre |= IS_HSW_EARLY_SDV(dev_priv); |
| 458 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); |
Chris Wilson | 0102ba1 | 2017-01-30 10:44:58 +0000 | [diff] [blame] | 459 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
Chris Wilson | 1aca96c | 2018-11-28 13:53:25 +0000 | [diff] [blame] | 460 | pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0); |
Chris Wilson | 248a124 | 2017-01-30 10:44:56 +0000 | [diff] [blame] | 461 | |
Chris Wilson | 7c5ff4a | 2017-01-30 10:44:57 +0000 | [diff] [blame] | 462 | if (pre) { |
Paulo Zanoni | 4fc7e84 | 2016-09-26 15:07:52 +0300 | [diff] [blame] | 463 | DRM_ERROR("This is a pre-production stepping. " |
| 464 | "It may not be fully functional.\n"); |
Chris Wilson | 7c5ff4a | 2017-01-30 10:44:57 +0000 | [diff] [blame] | 465 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
| 466 | } |
Paulo Zanoni | 4fc7e84 | 2016-09-26 15:07:52 +0300 | [diff] [blame] | 467 | } |
| 468 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 469 | /** |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 470 | * i915_driver_early_probe - setup state not requiring device access |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 471 | * @dev_priv: device private |
| 472 | * |
| 473 | * Initialize everything that is a "SW-only" state, that is state not |
| 474 | * requiring accessing the device or exposing the driver via kernel internal |
| 475 | * or userspace interfaces. Example steps belonging here: lock initialization, |
| 476 | * system memory allocation, setting up device specific attributes and |
| 477 | * function hooks not requiring accessing the device. |
| 478 | */ |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 479 | static int i915_driver_early_probe(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 480 | { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 481 | int ret = 0; |
| 482 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 483 | if (i915_inject_probe_failure(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 484 | return -ENODEV; |
| 485 | |
Tvrtko Ursulin | 805446c | 2019-03-27 14:23:28 +0000 | [diff] [blame] | 486 | intel_device_info_subplatform_init(dev_priv); |
| 487 | |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 488 | intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); |
Daniele Ceraolo Spurio | 0138575 | 2019-06-19 18:00:18 -0700 | [diff] [blame] | 489 | intel_uncore_init_early(&dev_priv->uncore, dev_priv); |
Daniele Ceraolo Spurio | 6cbe8830 | 2019-04-02 13:10:31 -0700 | [diff] [blame] | 490 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 491 | spin_lock_init(&dev_priv->irq_lock); |
| 492 | spin_lock_init(&dev_priv->gpu_error.lock); |
| 493 | mutex_init(&dev_priv->backlight_lock); |
Lyude | 317eaa9 | 2017-02-03 21:18:25 -0500 | [diff] [blame] | 494 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 495 | mutex_init(&dev_priv->sb_lock); |
Chris Wilson | a75d035 | 2019-04-26 09:17:18 +0100 | [diff] [blame] | 496 | pm_qos_add_request(&dev_priv->sb_qos, |
| 497 | PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
| 498 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 499 | mutex_init(&dev_priv->av_mutex); |
| 500 | mutex_init(&dev_priv->wm.wm_mutex); |
| 501 | mutex_init(&dev_priv->pps_mutex); |
Ramalingam C | 9055aac | 2019-02-16 23:06:51 +0530 | [diff] [blame] | 502 | mutex_init(&dev_priv->hdcp_comp_mutex); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 503 | |
Chris Wilson | 0b1de5d | 2016-08-12 12:39:59 +0100 | [diff] [blame] | 504 | i915_memcpy_init_early(dev_priv); |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 505 | intel_runtime_pm_init_early(&dev_priv->runtime_pm); |
Chris Wilson | 0b1de5d | 2016-08-12 12:39:59 +0100 | [diff] [blame] | 506 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 507 | ret = i915_workqueues_init(dev_priv); |
| 508 | if (ret < 0) |
Chris Wilson | f3bcb0c | 2019-07-18 08:00:10 +0100 | [diff] [blame] | 509 | return ret; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 510 | |
Daniele Ceraolo Spurio | 6f76098 | 2019-07-31 17:57:08 -0700 | [diff] [blame] | 511 | intel_wopcm_init_early(&dev_priv->wopcm); |
| 512 | |
Tvrtko Ursulin | 724e956 | 2019-06-21 08:07:42 +0100 | [diff] [blame] | 513 | intel_gt_init_early(&dev_priv->gt, dev_priv); |
Tvrtko Ursulin | 24635c5 | 2019-06-21 08:07:41 +0100 | [diff] [blame] | 514 | |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 515 | ret = i915_gem_init_early(dev_priv); |
| 516 | if (ret < 0) |
| 517 | goto err_workqueues; |
| 518 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 519 | /* This must be called before any calls to HAS_PCH_* */ |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 520 | intel_detect_pch(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 521 | |
Tvrtko Ursulin | 192aa18 | 2016-12-01 14:16:45 +0000 | [diff] [blame] | 522 | intel_pm_setup(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 523 | intel_init_dpio(dev_priv); |
Imre Deak | f28ec6f | 2018-08-06 12:58:37 +0300 | [diff] [blame] | 524 | ret = intel_power_domains_init(dev_priv); |
| 525 | if (ret < 0) |
Daniele Ceraolo Spurio | 6f76098 | 2019-07-31 17:57:08 -0700 | [diff] [blame] | 526 | goto err_gem; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 527 | intel_irq_init(dev_priv); |
| 528 | intel_init_display_hooks(dev_priv); |
| 529 | intel_init_clock_gating_hooks(dev_priv); |
| 530 | intel_init_audio_hooks(dev_priv); |
David Weinehall | 36cdd01 | 2016-08-22 13:59:31 +0300 | [diff] [blame] | 531 | intel_display_crc_init(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 532 | |
Paulo Zanoni | 4fc7e84 | 2016-09-26 15:07:52 +0300 | [diff] [blame] | 533 | intel_detect_preproduction_hw(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 534 | |
| 535 | return 0; |
| 536 | |
Daniele Ceraolo Spurio | 6f76098 | 2019-07-31 17:57:08 -0700 | [diff] [blame] | 537 | err_gem: |
Imre Deak | f28ec6f | 2018-08-06 12:58:37 +0300 | [diff] [blame] | 538 | i915_gem_cleanup_early(dev_priv); |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 539 | err_workqueues: |
Daniele Ceraolo Spurio | 6cf72db | 2019-07-31 17:57:07 -0700 | [diff] [blame] | 540 | intel_gt_driver_late_release(&dev_priv->gt); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 541 | i915_workqueues_cleanup(dev_priv); |
| 542 | return ret; |
| 543 | } |
| 544 | |
| 545 | /** |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 546 | * i915_driver_late_release - cleanup the setup done in |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 547 | * i915_driver_early_probe() |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 548 | * @dev_priv: device private |
| 549 | */ |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 550 | static void i915_driver_late_release(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 551 | { |
Joonas Lahtinen | cefcff8 | 2017-04-28 10:58:39 +0300 | [diff] [blame] | 552 | intel_irq_fini(dev_priv); |
Imre Deak | f28ec6f | 2018-08-06 12:58:37 +0300 | [diff] [blame] | 553 | intel_power_domains_cleanup(dev_priv); |
Michal Wajdeczko | a0de908 | 2018-03-23 12:34:49 +0000 | [diff] [blame] | 554 | i915_gem_cleanup_early(dev_priv); |
Daniele Ceraolo Spurio | 6cf72db | 2019-07-31 17:57:07 -0700 | [diff] [blame] | 555 | intel_gt_driver_late_release(&dev_priv->gt); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 556 | i915_workqueues_cleanup(dev_priv); |
Chris Wilson | a75d035 | 2019-04-26 09:17:18 +0100 | [diff] [blame] | 557 | |
| 558 | pm_qos_remove_request(&dev_priv->sb_qos); |
| 559 | mutex_destroy(&dev_priv->sb_lock); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 560 | } |
| 561 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 562 | /** |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 563 | * i915_driver_mmio_probe - setup device MMIO |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 564 | * @dev_priv: device private |
| 565 | * |
| 566 | * Setup minimal device state necessary for MMIO accesses later in the |
| 567 | * initialization sequence. The setup here should avoid any other device-wide |
| 568 | * side effects or exposing the driver via kernel internal or user space |
| 569 | * interfaces. |
| 570 | */ |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 571 | static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 572 | { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 573 | int ret; |
| 574 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 575 | if (i915_inject_probe_failure(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 576 | return -ENODEV; |
| 577 | |
Tvrtko Ursulin | da5f53b | 2016-12-01 14:16:40 +0000 | [diff] [blame] | 578 | if (i915_get_bridge_dev(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 579 | return -EIO; |
| 580 | |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 581 | ret = intel_uncore_init_mmio(&dev_priv->uncore); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 582 | if (ret < 0) |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 583 | goto err_bridge; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 584 | |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 585 | /* Try to make sure MCHBAR is enabled before poking at it */ |
| 586 | intel_setup_mchbar(dev_priv); |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 587 | |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 588 | intel_device_info_init_mmio(dev_priv); |
| 589 | |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 590 | intel_uncore_prune_mmio_domains(&dev_priv->uncore); |
Oscar Mateo | 26376a7 | 2018-03-16 14:14:49 +0200 | [diff] [blame] | 591 | |
Daniele Ceraolo Spurio | ca7b2c1 | 2019-07-13 11:00:13 +0100 | [diff] [blame] | 592 | intel_uc_init_mmio(&dev_priv->gt.uc); |
Sagar Arun Kamble | 1fc556f | 2017-10-04 15:33:24 +0000 | [diff] [blame] | 593 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 594 | ret = intel_engines_init_mmio(dev_priv); |
| 595 | if (ret) |
| 596 | goto err_uncore; |
| 597 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 598 | i915_gem_init_mmio(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 599 | |
| 600 | return 0; |
| 601 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 602 | err_uncore: |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 603 | intel_teardown_mchbar(dev_priv); |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 604 | intel_uncore_fini_mmio(&dev_priv->uncore); |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 605 | err_bridge: |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 606 | pci_dev_put(dev_priv->bridge_dev); |
| 607 | |
| 608 | return ret; |
| 609 | } |
| 610 | |
| 611 | /** |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 612 | * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 613 | * @dev_priv: device private |
| 614 | */ |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 615 | static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 616 | { |
Chris Wilson | f3bcb0c | 2019-07-18 08:00:10 +0100 | [diff] [blame] | 617 | intel_engines_cleanup(dev_priv); |
Daniele Ceraolo Spurio | 25286aa | 2019-03-19 11:35:40 -0700 | [diff] [blame] | 618 | intel_teardown_mchbar(dev_priv); |
Daniele Ceraolo Spurio | 3de6f85 | 2019-04-02 13:10:32 -0700 | [diff] [blame] | 619 | intel_uncore_fini_mmio(&dev_priv->uncore); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 620 | pci_dev_put(dev_priv->bridge_dev); |
| 621 | } |
| 622 | |
Chris Wilson | 94b4f3b | 2016-07-05 10:40:20 +0100 | [diff] [blame] | 623 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
| 624 | { |
Chuanxiao Dong | 67b7f33 | 2017-05-27 17:44:17 +0800 | [diff] [blame] | 625 | intel_gvt_sanitize_options(dev_priv); |
Chris Wilson | 94b4f3b | 2016-07-05 10:40:20 +0100 | [diff] [blame] | 626 | } |
| 627 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 628 | #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type |
| 629 | |
| 630 | static const char *intel_dram_type_str(enum intel_dram_type type) |
| 631 | { |
| 632 | static const char * const str[] = { |
| 633 | DRAM_TYPE_STR(UNKNOWN), |
| 634 | DRAM_TYPE_STR(DDR3), |
| 635 | DRAM_TYPE_STR(DDR4), |
| 636 | DRAM_TYPE_STR(LPDDR3), |
| 637 | DRAM_TYPE_STR(LPDDR4), |
| 638 | }; |
| 639 | |
| 640 | if (type >= ARRAY_SIZE(str)) |
| 641 | type = INTEL_DRAM_UNKNOWN; |
| 642 | |
| 643 | return str[type]; |
| 644 | } |
| 645 | |
| 646 | #undef DRAM_TYPE_STR |
| 647 | |
Ville Syrjälä | 54561b2 | 2019-03-06 22:35:42 +0200 | [diff] [blame] | 648 | static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) |
| 649 | { |
| 650 | return dimm->ranks * 64 / (dimm->width ?: 1); |
| 651 | } |
| 652 | |
Ville Syrjälä | ea411e6 | 2019-03-06 22:35:41 +0200 | [diff] [blame] | 653 | /* Returns total GB for the whole DIMM */ |
| 654 | static int skl_get_dimm_size(u16 val) |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 655 | { |
Ville Syrjälä | ea411e6 | 2019-03-06 22:35:41 +0200 | [diff] [blame] | 656 | return val & SKL_DRAM_SIZE_MASK; |
| 657 | } |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 658 | |
Ville Syrjälä | ea411e6 | 2019-03-06 22:35:41 +0200 | [diff] [blame] | 659 | static int skl_get_dimm_width(u16 val) |
| 660 | { |
| 661 | if (skl_get_dimm_size(val) == 0) |
| 662 | return 0; |
| 663 | |
| 664 | switch (val & SKL_DRAM_WIDTH_MASK) { |
| 665 | case SKL_DRAM_WIDTH_X8: |
| 666 | case SKL_DRAM_WIDTH_X16: |
| 667 | case SKL_DRAM_WIDTH_X32: |
| 668 | val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; |
| 669 | return 8 << val; |
| 670 | default: |
| 671 | MISSING_CASE(val); |
| 672 | return 0; |
| 673 | } |
| 674 | } |
| 675 | |
| 676 | static int skl_get_dimm_ranks(u16 val) |
| 677 | { |
| 678 | if (skl_get_dimm_size(val) == 0) |
| 679 | return 0; |
| 680 | |
| 681 | val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; |
| 682 | |
| 683 | return val + 1; |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 684 | } |
| 685 | |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 686 | /* Returns total GB for the whole DIMM */ |
| 687 | static int cnl_get_dimm_size(u16 val) |
| 688 | { |
| 689 | return (val & CNL_DRAM_SIZE_MASK) / 2; |
| 690 | } |
| 691 | |
| 692 | static int cnl_get_dimm_width(u16 val) |
| 693 | { |
| 694 | if (cnl_get_dimm_size(val) == 0) |
| 695 | return 0; |
| 696 | |
| 697 | switch (val & CNL_DRAM_WIDTH_MASK) { |
| 698 | case CNL_DRAM_WIDTH_X8: |
| 699 | case CNL_DRAM_WIDTH_X16: |
| 700 | case CNL_DRAM_WIDTH_X32: |
| 701 | val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; |
| 702 | return 8 << val; |
| 703 | default: |
| 704 | MISSING_CASE(val); |
| 705 | return 0; |
| 706 | } |
| 707 | } |
| 708 | |
| 709 | static int cnl_get_dimm_ranks(u16 val) |
| 710 | { |
| 711 | if (cnl_get_dimm_size(val) == 0) |
| 712 | return 0; |
| 713 | |
| 714 | val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; |
| 715 | |
| 716 | return val + 1; |
| 717 | } |
| 718 | |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 719 | static bool |
Ville Syrjälä | 54561b2 | 2019-03-06 22:35:42 +0200 | [diff] [blame] | 720 | skl_is_16gb_dimm(const struct dram_dimm_info *dimm) |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 721 | { |
Ville Syrjälä | 54561b2 | 2019-03-06 22:35:42 +0200 | [diff] [blame] | 722 | /* Convert total GB to Gb per DRAM device */ |
| 723 | return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 724 | } |
| 725 | |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 726 | static void |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 727 | skl_dram_get_dimm_info(struct drm_i915_private *dev_priv, |
| 728 | struct dram_dimm_info *dimm, |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 729 | int channel, char dimm_name, u16 val) |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 730 | { |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 731 | if (INTEL_GEN(dev_priv) >= 10) { |
| 732 | dimm->size = cnl_get_dimm_size(val); |
| 733 | dimm->width = cnl_get_dimm_width(val); |
| 734 | dimm->ranks = cnl_get_dimm_ranks(val); |
| 735 | } else { |
| 736 | dimm->size = skl_get_dimm_size(val); |
| 737 | dimm->width = skl_get_dimm_width(val); |
| 738 | dimm->ranks = skl_get_dimm_ranks(val); |
| 739 | } |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 740 | |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 741 | DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", |
| 742 | channel, dimm_name, dimm->size, dimm->width, dimm->ranks, |
| 743 | yesno(skl_is_16gb_dimm(dimm))); |
| 744 | } |
Ville Syrjälä | ea411e6 | 2019-03-06 22:35:41 +0200 | [diff] [blame] | 745 | |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 746 | static int |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 747 | skl_dram_get_channel_info(struct drm_i915_private *dev_priv, |
| 748 | struct dram_channel_info *ch, |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 749 | int channel, u32 val) |
| 750 | { |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 751 | skl_dram_get_dimm_info(dev_priv, &ch->dimm_l, |
| 752 | channel, 'L', val & 0xffff); |
| 753 | skl_dram_get_dimm_info(dev_priv, &ch->dimm_s, |
| 754 | channel, 'S', val >> 16); |
Ville Syrjälä | ea411e6 | 2019-03-06 22:35:41 +0200 | [diff] [blame] | 755 | |
Ville Syrjälä | 1d55967 | 2019-03-06 22:35:48 +0200 | [diff] [blame] | 756 | if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 757 | DRM_DEBUG_KMS("CH%u not populated\n", channel); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 758 | return -EINVAL; |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 759 | } |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 760 | |
Ville Syrjälä | 1d55967 | 2019-03-06 22:35:48 +0200 | [diff] [blame] | 761 | if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 762 | ch->ranks = 2; |
Ville Syrjälä | 1d55967 | 2019-03-06 22:35:48 +0200 | [diff] [blame] | 763 | else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 764 | ch->ranks = 2; |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 765 | else |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 766 | ch->ranks = 1; |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 767 | |
Ville Syrjälä | 54561b2 | 2019-03-06 22:35:42 +0200 | [diff] [blame] | 768 | ch->is_16gb_dimm = |
Ville Syrjälä | 1d55967 | 2019-03-06 22:35:48 +0200 | [diff] [blame] | 769 | skl_is_16gb_dimm(&ch->dimm_l) || |
| 770 | skl_is_16gb_dimm(&ch->dimm_s); |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 771 | |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 772 | DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n", |
| 773 | channel, ch->ranks, yesno(ch->is_16gb_dimm)); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 774 | |
| 775 | return 0; |
| 776 | } |
| 777 | |
Mahesh Kumar | 8a6c544 | 2018-08-24 15:02:25 +0530 | [diff] [blame] | 778 | static bool |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 779 | intel_is_dram_symmetric(const struct dram_channel_info *ch0, |
| 780 | const struct dram_channel_info *ch1) |
Mahesh Kumar | 8a6c544 | 2018-08-24 15:02:25 +0530 | [diff] [blame] | 781 | { |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 782 | return !memcmp(ch0, ch1, sizeof(*ch0)) && |
Ville Syrjälä | 1d55967 | 2019-03-06 22:35:48 +0200 | [diff] [blame] | 783 | (ch0->dimm_s.size == 0 || |
| 784 | !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); |
Mahesh Kumar | 8a6c544 | 2018-08-24 15:02:25 +0530 | [diff] [blame] | 785 | } |
| 786 | |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 787 | static int |
| 788 | skl_dram_get_channels_info(struct drm_i915_private *dev_priv) |
| 789 | { |
| 790 | struct dram_info *dram_info = &dev_priv->dram_info; |
Ville Syrjälä | 198b8dd | 2019-03-06 22:35:46 +0200 | [diff] [blame] | 791 | struct dram_channel_info ch0 = {}, ch1 = {}; |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 792 | u32 val; |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 793 | int ret; |
| 794 | |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 795 | val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 796 | ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 797 | if (ret == 0) |
| 798 | dram_info->num_channels++; |
| 799 | |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 800 | val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 801 | ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 802 | if (ret == 0) |
| 803 | dram_info->num_channels++; |
| 804 | |
| 805 | if (dram_info->num_channels == 0) { |
| 806 | DRM_INFO("Number of memory channels is zero\n"); |
| 807 | return -EINVAL; |
| 808 | } |
| 809 | |
| 810 | /* |
| 811 | * If any of the channel is single rank channel, worst case output |
| 812 | * will be same as if single rank memory, so consider single rank |
| 813 | * memory. |
| 814 | */ |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 815 | if (ch0.ranks == 1 || ch1.ranks == 1) |
| 816 | dram_info->ranks = 1; |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 817 | else |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 818 | dram_info->ranks = max(ch0.ranks, ch1.ranks); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 819 | |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 820 | if (dram_info->ranks == 0) { |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 821 | DRM_INFO("couldn't get memory rank information\n"); |
| 822 | return -EINVAL; |
| 823 | } |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 824 | |
Ville Syrjälä | 5d6f36b | 2018-10-23 21:21:02 +0300 | [diff] [blame] | 825 | dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; |
Mahesh Kumar | 86b5928 | 2018-08-31 16:39:42 +0530 | [diff] [blame] | 826 | |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 827 | dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); |
Mahesh Kumar | 8a6c544 | 2018-08-24 15:02:25 +0530 | [diff] [blame] | 828 | |
Ville Syrjälä | d75434b | 2019-03-06 22:35:47 +0200 | [diff] [blame] | 829 | DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n", |
| 830 | yesno(dram_info->symmetric_memory)); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 831 | return 0; |
| 832 | } |
| 833 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 834 | static enum intel_dram_type |
| 835 | skl_get_dram_type(struct drm_i915_private *dev_priv) |
| 836 | { |
| 837 | u32 val; |
| 838 | |
| 839 | val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); |
| 840 | |
| 841 | switch (val & SKL_DRAM_DDR_TYPE_MASK) { |
| 842 | case SKL_DRAM_DDR_TYPE_DDR3: |
| 843 | return INTEL_DRAM_DDR3; |
| 844 | case SKL_DRAM_DDR_TYPE_DDR4: |
| 845 | return INTEL_DRAM_DDR4; |
| 846 | case SKL_DRAM_DDR_TYPE_LPDDR3: |
| 847 | return INTEL_DRAM_LPDDR3; |
| 848 | case SKL_DRAM_DDR_TYPE_LPDDR4: |
| 849 | return INTEL_DRAM_LPDDR4; |
| 850 | default: |
| 851 | MISSING_CASE(val); |
| 852 | return INTEL_DRAM_UNKNOWN; |
| 853 | } |
| 854 | } |
| 855 | |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 856 | static int |
| 857 | skl_get_dram_info(struct drm_i915_private *dev_priv) |
| 858 | { |
| 859 | struct dram_info *dram_info = &dev_priv->dram_info; |
| 860 | u32 mem_freq_khz, val; |
| 861 | int ret; |
| 862 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 863 | dram_info->type = skl_get_dram_type(dev_priv); |
| 864 | DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type)); |
| 865 | |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 866 | ret = skl_dram_get_channels_info(dev_priv); |
| 867 | if (ret) |
| 868 | return ret; |
| 869 | |
| 870 | val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); |
| 871 | mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * |
| 872 | SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); |
| 873 | |
| 874 | dram_info->bandwidth_kbps = dram_info->num_channels * |
| 875 | mem_freq_khz * 8; |
| 876 | |
| 877 | if (dram_info->bandwidth_kbps == 0) { |
| 878 | DRM_INFO("Couldn't get system memory bandwidth\n"); |
| 879 | return -EINVAL; |
| 880 | } |
| 881 | |
| 882 | dram_info->valid = true; |
| 883 | return 0; |
| 884 | } |
| 885 | |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 886 | /* Returns Gb per DRAM device */ |
| 887 | static int bxt_get_dimm_size(u32 val) |
| 888 | { |
| 889 | switch (val & BXT_DRAM_SIZE_MASK) { |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 890 | case BXT_DRAM_SIZE_4GBIT: |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 891 | return 4; |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 892 | case BXT_DRAM_SIZE_6GBIT: |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 893 | return 6; |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 894 | case BXT_DRAM_SIZE_8GBIT: |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 895 | return 8; |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 896 | case BXT_DRAM_SIZE_12GBIT: |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 897 | return 12; |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 898 | case BXT_DRAM_SIZE_16GBIT: |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 899 | return 16; |
| 900 | default: |
| 901 | MISSING_CASE(val); |
| 902 | return 0; |
| 903 | } |
| 904 | } |
| 905 | |
| 906 | static int bxt_get_dimm_width(u32 val) |
| 907 | { |
| 908 | if (!bxt_get_dimm_size(val)) |
| 909 | return 0; |
| 910 | |
| 911 | val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; |
| 912 | |
| 913 | return 8 << val; |
| 914 | } |
| 915 | |
| 916 | static int bxt_get_dimm_ranks(u32 val) |
| 917 | { |
| 918 | if (!bxt_get_dimm_size(val)) |
| 919 | return 0; |
| 920 | |
| 921 | switch (val & BXT_DRAM_RANK_MASK) { |
| 922 | case BXT_DRAM_RANK_SINGLE: |
| 923 | return 1; |
| 924 | case BXT_DRAM_RANK_DUAL: |
| 925 | return 2; |
| 926 | default: |
| 927 | MISSING_CASE(val); |
| 928 | return 0; |
| 929 | } |
| 930 | } |
| 931 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 932 | static enum intel_dram_type bxt_get_dimm_type(u32 val) |
| 933 | { |
| 934 | if (!bxt_get_dimm_size(val)) |
| 935 | return INTEL_DRAM_UNKNOWN; |
| 936 | |
| 937 | switch (val & BXT_DRAM_TYPE_MASK) { |
| 938 | case BXT_DRAM_TYPE_DDR3: |
| 939 | return INTEL_DRAM_DDR3; |
| 940 | case BXT_DRAM_TYPE_LPDDR3: |
| 941 | return INTEL_DRAM_LPDDR3; |
| 942 | case BXT_DRAM_TYPE_DDR4: |
| 943 | return INTEL_DRAM_DDR4; |
| 944 | case BXT_DRAM_TYPE_LPDDR4: |
| 945 | return INTEL_DRAM_LPDDR4; |
| 946 | default: |
| 947 | MISSING_CASE(val); |
| 948 | return INTEL_DRAM_UNKNOWN; |
| 949 | } |
| 950 | } |
| 951 | |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 952 | static void bxt_get_dimm_info(struct dram_dimm_info *dimm, |
| 953 | u32 val) |
| 954 | { |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 955 | dimm->width = bxt_get_dimm_width(val); |
| 956 | dimm->ranks = bxt_get_dimm_ranks(val); |
Ville Syrjälä | 8860343 | 2019-03-06 22:35:44 +0200 | [diff] [blame] | 957 | |
| 958 | /* |
| 959 | * Size in register is Gb per DRAM device. Convert to total |
| 960 | * GB to match the way we report this for non-LP platforms. |
| 961 | */ |
| 962 | dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 963 | } |
| 964 | |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 965 | static int |
| 966 | bxt_get_dram_info(struct drm_i915_private *dev_priv) |
| 967 | { |
| 968 | struct dram_info *dram_info = &dev_priv->dram_info; |
| 969 | u32 dram_channels; |
| 970 | u32 mem_freq_khz, val; |
| 971 | u8 num_active_channels; |
| 972 | int i; |
| 973 | |
| 974 | val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0); |
| 975 | mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * |
| 976 | BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); |
| 977 | |
| 978 | dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; |
| 979 | num_active_channels = hweight32(dram_channels); |
| 980 | |
| 981 | /* Each active bit represents 4-byte channel */ |
| 982 | dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4); |
| 983 | |
| 984 | if (dram_info->bandwidth_kbps == 0) { |
| 985 | DRM_INFO("Couldn't get system memory bandwidth\n"); |
| 986 | return -EINVAL; |
| 987 | } |
| 988 | |
| 989 | /* |
| 990 | * Now read each DUNIT8/9/10/11 to check the rank of each dimms. |
| 991 | */ |
| 992 | for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 993 | struct dram_dimm_info dimm; |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 994 | enum intel_dram_type type; |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 995 | |
| 996 | val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); |
| 997 | if (val == 0xFFFFFFFF) |
| 998 | continue; |
| 999 | |
| 1000 | dram_info->num_channels++; |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1001 | |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 1002 | bxt_get_dimm_info(&dimm, val); |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 1003 | type = bxt_get_dimm_type(val); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1004 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 1005 | WARN_ON(type != INTEL_DRAM_UNKNOWN && |
| 1006 | dram_info->type != INTEL_DRAM_UNKNOWN && |
| 1007 | dram_info->type != type); |
| 1008 | |
| 1009 | DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 1010 | i - BXT_D_CR_DRP0_DUNIT_START, |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 1011 | dimm.size, dimm.width, dimm.ranks, |
| 1012 | intel_dram_type_str(type)); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1013 | |
| 1014 | /* |
| 1015 | * If any of the channel is single rank channel, |
| 1016 | * worst case output will be same as if single rank |
| 1017 | * memory, so consider single rank memory. |
| 1018 | */ |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 1019 | if (dram_info->ranks == 0) |
Ville Syrjälä | a62819a | 2019-03-06 22:35:43 +0200 | [diff] [blame] | 1020 | dram_info->ranks = dimm.ranks; |
| 1021 | else if (dimm.ranks == 1) |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 1022 | dram_info->ranks = 1; |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 1023 | |
| 1024 | if (type != INTEL_DRAM_UNKNOWN) |
| 1025 | dram_info->type = type; |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1026 | } |
| 1027 | |
Ville Syrjälä | b185a35 | 2019-03-06 22:35:51 +0200 | [diff] [blame] | 1028 | if (dram_info->type == INTEL_DRAM_UNKNOWN || |
| 1029 | dram_info->ranks == 0) { |
| 1030 | DRM_INFO("couldn't get memory information\n"); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1031 | return -EINVAL; |
| 1032 | } |
| 1033 | |
| 1034 | dram_info->valid = true; |
| 1035 | return 0; |
| 1036 | } |
| 1037 | |
| 1038 | static void |
| 1039 | intel_get_dram_info(struct drm_i915_private *dev_priv) |
| 1040 | { |
| 1041 | struct dram_info *dram_info = &dev_priv->dram_info; |
| 1042 | int ret; |
| 1043 | |
Ville Syrjälä | 5d6f36b | 2018-10-23 21:21:02 +0300 | [diff] [blame] | 1044 | /* |
| 1045 | * Assume 16Gb DIMMs are present until proven otherwise. |
| 1046 | * This is only used for the level 0 watermark latency |
| 1047 | * w/a which does not apply to bxt/glk. |
| 1048 | */ |
| 1049 | dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); |
| 1050 | |
Ville Syrjälä | 331ecde | 2019-03-06 22:35:45 +0200 | [diff] [blame] | 1051 | if (INTEL_GEN(dev_priv) < 9) |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1052 | return; |
| 1053 | |
Ville Syrjälä | 331ecde | 2019-03-06 22:35:45 +0200 | [diff] [blame] | 1054 | if (IS_GEN9_LP(dev_priv)) |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 1055 | ret = bxt_get_dram_info(dev_priv); |
Mahesh Kumar | 5771caf | 2018-08-24 15:02:22 +0530 | [diff] [blame] | 1056 | else |
Ville Syrjälä | 6d9c1e9 | 2019-03-06 22:35:50 +0200 | [diff] [blame] | 1057 | ret = skl_get_dram_info(dev_priv); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1058 | if (ret) |
| 1059 | return; |
| 1060 | |
Ville Syrjälä | 30a533e | 2019-03-06 22:35:49 +0200 | [diff] [blame] | 1061 | DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n", |
| 1062 | dram_info->bandwidth_kbps, |
| 1063 | dram_info->num_channels); |
| 1064 | |
Ville Syrjälä | 54561b2 | 2019-03-06 22:35:42 +0200 | [diff] [blame] | 1065 | DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n", |
Ville Syrjälä | 80373fb | 2019-03-06 22:35:40 +0200 | [diff] [blame] | 1066 | dram_info->ranks, yesno(dram_info->is_16gb_dimm)); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1067 | } |
| 1068 | |
Daniele Ceraolo Spurio | f6ac993 | 2019-03-28 10:45:32 -0700 | [diff] [blame] | 1069 | static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap) |
| 1070 | { |
| 1071 | const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; |
| 1072 | const unsigned int sets[4] = { 1, 1, 2, 2 }; |
| 1073 | |
| 1074 | return EDRAM_NUM_BANKS(cap) * |
| 1075 | ways[EDRAM_WAYS_IDX(cap)] * |
| 1076 | sets[EDRAM_SETS_IDX(cap)]; |
| 1077 | } |
| 1078 | |
| 1079 | static void edram_detect(struct drm_i915_private *dev_priv) |
| 1080 | { |
| 1081 | u32 edram_cap = 0; |
| 1082 | |
| 1083 | if (!(IS_HASWELL(dev_priv) || |
| 1084 | IS_BROADWELL(dev_priv) || |
| 1085 | INTEL_GEN(dev_priv) >= 9)) |
| 1086 | return; |
| 1087 | |
| 1088 | edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP); |
| 1089 | |
| 1090 | /* NB: We can't write IDICR yet because we don't have gt funcs set up */ |
| 1091 | |
| 1092 | if (!(edram_cap & EDRAM_ENABLED)) |
| 1093 | return; |
| 1094 | |
| 1095 | /* |
| 1096 | * The needed capability bits for size calculation are not there with |
| 1097 | * pre gen9 so return 128MB always. |
| 1098 | */ |
| 1099 | if (INTEL_GEN(dev_priv) < 9) |
| 1100 | dev_priv->edram_size_mb = 128; |
| 1101 | else |
| 1102 | dev_priv->edram_size_mb = |
| 1103 | gen9_edram_size_mb(dev_priv, edram_cap); |
| 1104 | |
Chris Wilson | 88f8065 | 2019-08-15 10:36:04 +0100 | [diff] [blame^] | 1105 | dev_info(dev_priv->drm.dev, |
| 1106 | "Found %uMB of eDRAM\n", dev_priv->edram_size_mb); |
Daniele Ceraolo Spurio | f6ac993 | 2019-03-28 10:45:32 -0700 | [diff] [blame] | 1107 | } |
| 1108 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1109 | /** |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1110 | * i915_driver_hw_probe - setup state requiring device access |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1111 | * @dev_priv: device private |
| 1112 | * |
| 1113 | * Setup state that requires accessing the device, but doesn't require |
| 1114 | * exposing the driver via kernel internal or userspace interfaces. |
| 1115 | */ |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1116 | static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1117 | { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1118 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1119 | int ret; |
| 1120 | |
Michal Wajdeczko | 50d8441 | 2019-08-02 18:40:50 +0000 | [diff] [blame] | 1121 | if (i915_inject_probe_failure(dev_priv)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1122 | return -ENODEV; |
| 1123 | |
Jani Nikula | 1400cc7 | 2018-12-31 16:56:43 +0200 | [diff] [blame] | 1124 | intel_device_info_runtime_init(dev_priv); |
Chris Wilson | 94b4f3b | 2016-07-05 10:40:20 +0100 | [diff] [blame] | 1125 | |
Chris Wilson | 4bdafb9 | 2018-09-26 21:12:22 +0100 | [diff] [blame] | 1126 | if (HAS_PPGTT(dev_priv)) { |
| 1127 | if (intel_vgpu_active(dev_priv) && |
Chris Wilson | ca6ac68 | 2019-03-14 22:38:35 +0000 | [diff] [blame] | 1128 | !intel_vgpu_has_full_ppgtt(dev_priv)) { |
Chris Wilson | 4bdafb9 | 2018-09-26 21:12:22 +0100 | [diff] [blame] | 1129 | i915_report_error(dev_priv, |
| 1130 | "incompatible vGPU found, support for isolated ppGTT required\n"); |
| 1131 | return -ENXIO; |
| 1132 | } |
| 1133 | } |
| 1134 | |
Chris Wilson | 4659289 | 2018-11-30 12:59:54 +0000 | [diff] [blame] | 1135 | if (HAS_EXECLISTS(dev_priv)) { |
| 1136 | /* |
| 1137 | * Older GVT emulation depends upon intercepting CSB mmio, |
| 1138 | * which we no longer use, preferring to use the HWSP cache |
| 1139 | * instead. |
| 1140 | */ |
| 1141 | if (intel_vgpu_active(dev_priv) && |
| 1142 | !intel_vgpu_has_hwsp_emulation(dev_priv)) { |
| 1143 | i915_report_error(dev_priv, |
| 1144 | "old vGPU host found, support for HWSP emulation required\n"); |
| 1145 | return -ENXIO; |
| 1146 | } |
| 1147 | } |
| 1148 | |
Chris Wilson | 94b4f3b | 2016-07-05 10:40:20 +0100 | [diff] [blame] | 1149 | intel_sanitize_options(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1150 | |
Daniele Ceraolo Spurio | f6ac993 | 2019-03-28 10:45:32 -0700 | [diff] [blame] | 1151 | /* needs to be done before ggtt probe */ |
| 1152 | edram_detect(dev_priv); |
| 1153 | |
Lionel Landwerlin | 9f9b279 | 2017-10-27 15:59:31 +0100 | [diff] [blame] | 1154 | i915_perf_init(dev_priv); |
| 1155 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 1156 | ret = i915_ggtt_probe_hw(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1157 | if (ret) |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1158 | goto err_perf; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1159 | |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1160 | /* |
| 1161 | * WARNING: Apparently we must kick fbdev drivers before vgacon, |
| 1162 | * otherwise the vga fbdev driver falls over. |
| 1163 | */ |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1164 | ret = i915_kick_out_firmware_fb(dev_priv); |
| 1165 | if (ret) { |
| 1166 | DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1167 | goto err_ggtt; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1168 | } |
| 1169 | |
Gerd Hoffmann | c6b38fb | 2019-03-01 10:24:59 +0100 | [diff] [blame] | 1170 | ret = vga_remove_vgacon(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1171 | if (ret) { |
| 1172 | DRM_ERROR("failed to remove conflicting VGA console\n"); |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1173 | goto err_ggtt; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1174 | } |
| 1175 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 1176 | ret = i915_ggtt_init_hw(dev_priv); |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 1177 | if (ret) |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1178 | goto err_ggtt; |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 1179 | |
Tvrtko Ursulin | d8a4424 | 2019-06-21 08:08:06 +0100 | [diff] [blame] | 1180 | intel_gt_init_hw(dev_priv); |
| 1181 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 1182 | ret = i915_ggtt_enable_hw(dev_priv); |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 1183 | if (ret) { |
| 1184 | DRM_ERROR("failed to enable GGTT\n"); |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1185 | goto err_ggtt; |
Chris Wilson | 0088e52 | 2016-08-04 07:52:21 +0100 | [diff] [blame] | 1186 | } |
| 1187 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1188 | pci_set_master(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1189 | |
| 1190 | /* overlay on gen2 is broken and can't address above 1G */ |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1191 | if (IS_GEN(dev_priv, 2)) { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1192 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1193 | if (ret) { |
| 1194 | DRM_ERROR("failed to set DMA mask\n"); |
| 1195 | |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1196 | goto err_ggtt; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1197 | } |
| 1198 | } |
| 1199 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1200 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
| 1201 | * using 32bit addressing, overwriting memory if HWS is located |
| 1202 | * above 4GB. |
| 1203 | * |
| 1204 | * The documentation also mentions an issue with undefined |
| 1205 | * behaviour if any general state is accessed within a page above 4GB, |
| 1206 | * which also needs to be handled carefully. |
| 1207 | */ |
Jani Nikula | c0f8683 | 2016-12-07 12:13:04 +0200 | [diff] [blame] | 1208 | if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1209 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1210 | |
| 1211 | if (ret) { |
| 1212 | DRM_ERROR("failed to set DMA mask\n"); |
| 1213 | |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1214 | goto err_ggtt; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1215 | } |
| 1216 | } |
| 1217 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1218 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
| 1219 | PM_QOS_DEFAULT_VALUE); |
| 1220 | |
Daniele Ceraolo Spurio | 19e0a8d | 2019-06-19 18:00:17 -0700 | [diff] [blame] | 1221 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
| 1222 | intel_sanitize_gt_powersave(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1223 | |
Tvrtko Ursulin | 25d140f | 2018-12-03 13:33:19 +0000 | [diff] [blame] | 1224 | intel_gt_init_workarounds(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1225 | |
| 1226 | /* On the 945G/GM, the chipset reports the MSI capability on the |
| 1227 | * integrated graphics even though the support isn't actually there |
| 1228 | * according to the published specs. It doesn't appear to function |
| 1229 | * correctly in testing on 945G. |
| 1230 | * This may be a side effect of MSI having been made available for PEG |
| 1231 | * and the registers being closely associated. |
| 1232 | * |
| 1233 | * According to chipset errata, on the 965GM, MSI interrupts may |
Ville Syrjälä | e38c2da | 2017-06-26 23:30:51 +0300 | [diff] [blame] | 1234 | * be lost or delayed, and was defeatured. MSI interrupts seem to |
| 1235 | * get lost on g4x as well, and interrupt delivery seems to stay |
| 1236 | * properly dead afterwards. So we'll just disable them for all |
| 1237 | * pre-gen5 chipsets. |
Lucas De Marchi | 8a29c77 | 2018-05-23 11:04:35 -0700 | [diff] [blame] | 1238 | * |
| 1239 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy |
| 1240 | * interrupts even when in MSI mode. This results in spurious |
| 1241 | * interrupt warnings if the legacy irq no. is shared with another |
| 1242 | * device. The kernel then disables that interrupt source and so |
| 1243 | * prevents the other device from working properly. |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1244 | */ |
Ville Syrjälä | e38c2da | 2017-06-26 23:30:51 +0300 | [diff] [blame] | 1245 | if (INTEL_GEN(dev_priv) >= 5) { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1246 | if (pci_enable_msi(pdev) < 0) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1247 | DRM_DEBUG_DRIVER("can't enable MSI"); |
| 1248 | } |
| 1249 | |
Zhenyu Wang | 26f837e | 2017-01-13 10:46:09 +0800 | [diff] [blame] | 1250 | ret = intel_gvt_init(dev_priv); |
| 1251 | if (ret) |
Chris Wilson | 7ab87ed | 2018-07-10 15:38:21 +0100 | [diff] [blame] | 1252 | goto err_msi; |
| 1253 | |
| 1254 | intel_opregion_setup(dev_priv); |
Mahesh Kumar | cbfa59d | 2018-08-24 15:02:21 +0530 | [diff] [blame] | 1255 | /* |
| 1256 | * Fill the dram structure to get the system raw bandwidth and |
| 1257 | * dram info. This will be used for memory latency calculation. |
| 1258 | */ |
| 1259 | intel_get_dram_info(dev_priv); |
| 1260 | |
Ville Syrjälä | c457d9c | 2019-05-24 18:36:14 +0300 | [diff] [blame] | 1261 | intel_bw_init_hw(dev_priv); |
Zhenyu Wang | 26f837e | 2017-01-13 10:46:09 +0800 | [diff] [blame] | 1262 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1263 | return 0; |
| 1264 | |
Chris Wilson | 7ab87ed | 2018-07-10 15:38:21 +0100 | [diff] [blame] | 1265 | err_msi: |
| 1266 | if (pdev->msi_enabled) |
| 1267 | pci_disable_msi(pdev); |
| 1268 | pm_qos_remove_request(&dev_priv->pm_qos); |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1269 | err_ggtt: |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1270 | i915_ggtt_driver_release(dev_priv); |
Chris Wilson | 9f172f6 | 2018-04-14 10:12:33 +0100 | [diff] [blame] | 1271 | err_perf: |
| 1272 | i915_perf_fini(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1273 | return ret; |
| 1274 | } |
| 1275 | |
| 1276 | /** |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 1277 | * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1278 | * @dev_priv: device private |
| 1279 | */ |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 1280 | static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1281 | { |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1282 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1283 | |
Lionel Landwerlin | 9f9b279 | 2017-10-27 15:59:31 +0100 | [diff] [blame] | 1284 | i915_perf_fini(dev_priv); |
| 1285 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1286 | if (pdev->msi_enabled) |
| 1287 | pci_disable_msi(pdev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1288 | |
| 1289 | pm_qos_remove_request(&dev_priv->pm_qos); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1290 | } |
| 1291 | |
| 1292 | /** |
| 1293 | * i915_driver_register - register the driver with the rest of the system |
| 1294 | * @dev_priv: device private |
| 1295 | * |
| 1296 | * Perform any steps necessary to make the driver available via kernel |
| 1297 | * internal or userspace interfaces. |
| 1298 | */ |
| 1299 | static void i915_driver_register(struct drm_i915_private *dev_priv) |
| 1300 | { |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 1301 | struct drm_device *dev = &dev_priv->drm; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1302 | |
Chris Wilson | c29579d | 2019-08-06 13:42:59 +0100 | [diff] [blame] | 1303 | i915_gem_driver_register(dev_priv); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1304 | i915_pmu_register(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1305 | |
| 1306 | /* |
| 1307 | * Notify a valid surface after modesetting, |
| 1308 | * when running inside a VM. |
| 1309 | */ |
| 1310 | if (intel_vgpu_active(dev_priv)) |
| 1311 | I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); |
| 1312 | |
| 1313 | /* Reveal our presence to userspace */ |
| 1314 | if (drm_dev_register(dev, 0) == 0) { |
| 1315 | i915_debugfs_register(dev_priv); |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 1316 | i915_setup_sysfs(dev_priv); |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 1317 | |
| 1318 | /* Depends on sysfs having been initialized */ |
| 1319 | i915_perf_register(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1320 | } else |
| 1321 | DRM_ERROR("Failed to register driver for userspace access!\n"); |
| 1322 | |
José Roberto de Souza | e1bf094 | 2018-11-30 15:20:47 -0800 | [diff] [blame] | 1323 | if (HAS_DISPLAY(dev_priv)) { |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1324 | /* Must be done after probing outputs */ |
| 1325 | intel_opregion_register(dev_priv); |
| 1326 | acpi_video_register(); |
| 1327 | } |
| 1328 | |
Lucas De Marchi | cf819ef | 2018-12-12 10:10:43 -0800 | [diff] [blame] | 1329 | if (IS_GEN(dev_priv, 5)) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1330 | intel_gpu_ips_init(dev_priv); |
| 1331 | |
Jerome Anand | eef5732 | 2017-01-25 04:27:49 +0530 | [diff] [blame] | 1332 | intel_audio_init(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1333 | |
| 1334 | /* |
| 1335 | * Some ports require correctly set-up hpd registers for detection to |
| 1336 | * work properly (leading to ghost connected connector status), e.g. VGA |
| 1337 | * on gm45. Hence we can only set up the initial fbdev config after hpd |
| 1338 | * irqs are fully enabled. We do it last so that the async config |
| 1339 | * cannot run before the connectors are registered. |
| 1340 | */ |
| 1341 | intel_fbdev_initial_config_async(dev); |
Chris Wilson | 448aa91 | 2017-11-28 11:01:47 +0000 | [diff] [blame] | 1342 | |
| 1343 | /* |
| 1344 | * We need to coordinate the hotplugs with the asynchronous fbdev |
| 1345 | * configuration, for which we use the fbdev->async_cookie. |
| 1346 | */ |
José Roberto de Souza | e1bf094 | 2018-11-30 15:20:47 -0800 | [diff] [blame] | 1347 | if (HAS_DISPLAY(dev_priv)) |
Chris Wilson | 448aa91 | 2017-11-28 11:01:47 +0000 | [diff] [blame] | 1348 | drm_kms_helper_poll_init(dev); |
Chris Wilson | 07d8057 | 2018-08-16 15:37:56 +0300 | [diff] [blame] | 1349 | |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1350 | intel_power_domains_enable(dev_priv); |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1351 | intel_runtime_pm_enable(&dev_priv->runtime_pm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1352 | } |
| 1353 | |
| 1354 | /** |
| 1355 | * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() |
| 1356 | * @dev_priv: device private |
| 1357 | */ |
| 1358 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) |
| 1359 | { |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1360 | intel_runtime_pm_disable(&dev_priv->runtime_pm); |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1361 | intel_power_domains_disable(dev_priv); |
Chris Wilson | 07d8057 | 2018-08-16 15:37:56 +0300 | [diff] [blame] | 1362 | |
Daniel Vetter | 4f256d8 | 2017-07-15 00:46:55 +0200 | [diff] [blame] | 1363 | intel_fbdev_unregister(dev_priv); |
Jerome Anand | eef5732 | 2017-01-25 04:27:49 +0530 | [diff] [blame] | 1364 | intel_audio_deinit(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1365 | |
Chris Wilson | 448aa91 | 2017-11-28 11:01:47 +0000 | [diff] [blame] | 1366 | /* |
| 1367 | * After flushing the fbdev (incl. a late async config which will |
| 1368 | * have delayed queuing of a hotplug event), then flush the hotplug |
| 1369 | * events. |
| 1370 | */ |
| 1371 | drm_kms_helper_poll_fini(&dev_priv->drm); |
| 1372 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1373 | intel_gpu_ips_teardown(); |
| 1374 | acpi_video_unregister(); |
| 1375 | intel_opregion_unregister(dev_priv); |
| 1376 | |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 1377 | i915_perf_unregister(dev_priv); |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1378 | i915_pmu_unregister(dev_priv); |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 1379 | |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 1380 | i915_teardown_sysfs(dev_priv); |
Janusz Krzysztofik | d69990e | 2019-04-05 15:02:34 +0200 | [diff] [blame] | 1381 | drm_dev_unplug(&dev_priv->drm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1382 | |
Chris Wilson | c29579d | 2019-08-06 13:42:59 +0100 | [diff] [blame] | 1383 | i915_gem_driver_unregister(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1384 | } |
| 1385 | |
Michal Wajdeczko | 27d558a | 2017-12-21 21:57:35 +0000 | [diff] [blame] | 1386 | static void i915_welcome_messages(struct drm_i915_private *dev_priv) |
| 1387 | { |
| 1388 | if (drm_debug & DRM_UT_DRIVER) { |
| 1389 | struct drm_printer p = drm_debug_printer("i915 device info:"); |
| 1390 | |
Tvrtko Ursulin | 805446c | 2019-03-27 14:23:28 +0000 | [diff] [blame] | 1391 | drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", |
Jani Nikula | 1787a98 | 2018-12-31 16:56:45 +0200 | [diff] [blame] | 1392 | INTEL_DEVID(dev_priv), |
| 1393 | INTEL_REVID(dev_priv), |
| 1394 | intel_platform_name(INTEL_INFO(dev_priv)->platform), |
Tvrtko Ursulin | 805446c | 2019-03-27 14:23:28 +0000 | [diff] [blame] | 1395 | intel_subplatform(RUNTIME_INFO(dev_priv), |
| 1396 | INTEL_INFO(dev_priv)->platform), |
Jani Nikula | 1787a98 | 2018-12-31 16:56:45 +0200 | [diff] [blame] | 1397 | INTEL_GEN(dev_priv)); |
| 1398 | |
| 1399 | intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p); |
Jani Nikula | 0258404 | 2018-12-31 16:56:41 +0200 | [diff] [blame] | 1400 | intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); |
Michal Wajdeczko | 27d558a | 2017-12-21 21:57:35 +0000 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
| 1404 | DRM_INFO("DRM_I915_DEBUG enabled\n"); |
| 1405 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
| 1406 | DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); |
Imre Deak | 6dfc4a8 | 2018-08-16 22:34:14 +0300 | [diff] [blame] | 1407 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) |
| 1408 | DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n"); |
Michal Wajdeczko | 27d558a | 2017-12-21 21:57:35 +0000 | [diff] [blame] | 1409 | } |
| 1410 | |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1411 | static struct drm_i915_private * |
| 1412 | i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1413 | { |
| 1414 | const struct intel_device_info *match_info = |
| 1415 | (struct intel_device_info *)ent->driver_data; |
| 1416 | struct intel_device_info *device_info; |
| 1417 | struct drm_i915_private *i915; |
Andi Shyti | 2ddcc98 | 2018-10-02 12:20:47 +0300 | [diff] [blame] | 1418 | int err; |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1419 | |
| 1420 | i915 = kzalloc(sizeof(*i915), GFP_KERNEL); |
| 1421 | if (!i915) |
Andi Shyti | 2ddcc98 | 2018-10-02 12:20:47 +0300 | [diff] [blame] | 1422 | return ERR_PTR(-ENOMEM); |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1423 | |
Andi Shyti | 2ddcc98 | 2018-10-02 12:20:47 +0300 | [diff] [blame] | 1424 | err = drm_dev_init(&i915->drm, &driver, &pdev->dev); |
| 1425 | if (err) { |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1426 | kfree(i915); |
Andi Shyti | 2ddcc98 | 2018-10-02 12:20:47 +0300 | [diff] [blame] | 1427 | return ERR_PTR(err); |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1428 | } |
| 1429 | |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1430 | i915->drm.dev_private = i915; |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1431 | |
| 1432 | i915->drm.pdev = pdev; |
| 1433 | pci_set_drvdata(pdev, i915); |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1434 | |
| 1435 | /* Setup the write-once "constant" device info */ |
| 1436 | device_info = mkwrite_device_info(i915); |
| 1437 | memcpy(device_info, match_info, sizeof(*device_info)); |
Jani Nikula | 0258404 | 2018-12-31 16:56:41 +0200 | [diff] [blame] | 1438 | RUNTIME_INFO(i915)->device_id = pdev->device; |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1439 | |
Chris Wilson | 74f6e18 | 2018-09-26 11:47:07 +0100 | [diff] [blame] | 1440 | BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1441 | |
| 1442 | return i915; |
| 1443 | } |
| 1444 | |
Chris Wilson | 31962ca | 2018-09-05 15:09:21 +0100 | [diff] [blame] | 1445 | static void i915_driver_destroy(struct drm_i915_private *i915) |
| 1446 | { |
| 1447 | struct pci_dev *pdev = i915->drm.pdev; |
| 1448 | |
| 1449 | drm_dev_fini(&i915->drm); |
| 1450 | kfree(i915); |
| 1451 | |
| 1452 | /* And make sure we never chase our dangling pointer from pci_dev */ |
| 1453 | pci_set_drvdata(pdev, NULL); |
| 1454 | } |
| 1455 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1456 | /** |
Janusz Krzysztofik | b01558e | 2019-07-12 13:24:26 +0200 | [diff] [blame] | 1457 | * i915_driver_probe - setup chip and create an initial config |
Joonas Lahtinen | d2ad3ae | 2016-11-10 15:36:34 +0200 | [diff] [blame] | 1458 | * @pdev: PCI device |
| 1459 | * @ent: matching PCI ID entry |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1460 | * |
Janusz Krzysztofik | b01558e | 2019-07-12 13:24:26 +0200 | [diff] [blame] | 1461 | * The driver probe routine has to do several things: |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1462 | * - drive output discovery via intel_modeset_init() |
| 1463 | * - initialize the memory manager |
| 1464 | * - allocate initial config memory |
| 1465 | * - setup the DRM framebuffer with the allocated memory |
| 1466 | */ |
Janusz Krzysztofik | b01558e | 2019-07-12 13:24:26 +0200 | [diff] [blame] | 1467 | int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1468 | { |
Maarten Lankhorst | 8d2b47d | 2017-02-02 08:41:42 +0100 | [diff] [blame] | 1469 | const struct intel_device_info *match_info = |
| 1470 | (struct intel_device_info *)ent->driver_data; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1471 | struct drm_i915_private *dev_priv; |
| 1472 | int ret; |
| 1473 | |
Chris Wilson | 55ac5a1 | 2018-09-05 15:09:20 +0100 | [diff] [blame] | 1474 | dev_priv = i915_driver_create(pdev, ent); |
Andi Shyti | 2ddcc98 | 2018-10-02 12:20:47 +0300 | [diff] [blame] | 1475 | if (IS_ERR(dev_priv)) |
| 1476 | return PTR_ERR(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1477 | |
Ville Syrjälä | 1feb64c | 2018-09-13 16:16:22 +0300 | [diff] [blame] | 1478 | /* Disable nuclear pageflip by default on pre-ILK */ |
| 1479 | if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) |
| 1480 | dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; |
| 1481 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1482 | ret = pci_enable_device(pdev); |
| 1483 | if (ret) |
Chris Wilson | cad3688 | 2017-02-10 16:35:21 +0000 | [diff] [blame] | 1484 | goto out_fini; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1485 | |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1486 | ret = i915_driver_early_probe(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1487 | if (ret < 0) |
| 1488 | goto out_pci_disable; |
| 1489 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1490 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1491 | |
Daniele Ceraolo Spurio | 9e138ea | 2019-06-19 18:00:21 -0700 | [diff] [blame] | 1492 | i915_detect_vgpu(dev_priv); |
| 1493 | |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1494 | ret = i915_driver_mmio_probe(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1495 | if (ret < 0) |
| 1496 | goto out_runtime_pm_put; |
| 1497 | |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1498 | ret = i915_driver_hw_probe(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1499 | if (ret < 0) |
| 1500 | goto out_cleanup_mmio; |
| 1501 | |
Janusz Krzysztofik | 0b61b8b | 2019-07-12 13:24:30 +0200 | [diff] [blame] | 1502 | ret = i915_driver_modeset_probe(&dev_priv->drm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1503 | if (ret < 0) |
Daniel Vetter | baf5438 | 2017-06-21 10:28:41 +0200 | [diff] [blame] | 1504 | goto out_cleanup_hw; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1505 | |
| 1506 | i915_driver_register(dev_priv); |
| 1507 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1508 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1509 | |
Michal Wajdeczko | 27d558a | 2017-12-21 21:57:35 +0000 | [diff] [blame] | 1510 | i915_welcome_messages(dev_priv); |
| 1511 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1512 | return 0; |
| 1513 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1514 | out_cleanup_hw: |
Janusz Krzysztofik | 78dae1a | 2019-07-12 13:24:29 +0200 | [diff] [blame] | 1515 | i915_driver_hw_remove(dev_priv); |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1516 | i915_ggtt_driver_release(dev_priv); |
Daniele Ceraolo Spurio | 19e0a8d | 2019-06-19 18:00:17 -0700 | [diff] [blame] | 1517 | |
| 1518 | /* Paranoia: make sure we have disabled everything before we exit. */ |
| 1519 | intel_sanitize_gt_powersave(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1520 | out_cleanup_mmio: |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1521 | i915_driver_mmio_release(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1522 | out_runtime_pm_put: |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1523 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1524 | i915_driver_late_release(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1525 | out_pci_disable: |
| 1526 | pci_disable_device(pdev); |
Chris Wilson | cad3688 | 2017-02-10 16:35:21 +0000 | [diff] [blame] | 1527 | out_fini: |
Janusz Krzysztofik | f2db53f | 2019-07-12 13:24:27 +0200 | [diff] [blame] | 1528 | i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret); |
Chris Wilson | 31962ca | 2018-09-05 15:09:21 +0100 | [diff] [blame] | 1529 | i915_driver_destroy(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1530 | return ret; |
| 1531 | } |
| 1532 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1533 | void i915_driver_remove(struct drm_i915_private *i915) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1534 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1535 | struct pci_dev *pdev = i915->drm.pdev; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1536 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1537 | disable_rpm_wakeref_asserts(&i915->runtime_pm); |
Chris Wilson | 07d8057 | 2018-08-16 15:37:56 +0300 | [diff] [blame] | 1538 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1539 | i915_driver_unregister(i915); |
Daniel Vetter | 99c539b | 2017-07-15 00:46:56 +0200 | [diff] [blame] | 1540 | |
Janusz Krzysztofik | 141f376 | 2019-04-06 11:40:34 +0100 | [diff] [blame] | 1541 | /* |
| 1542 | * After unregistering the device to prevent any new users, cancel |
| 1543 | * all in-flight requests so that we can quickly unbind the active |
| 1544 | * resources. |
| 1545 | */ |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1546 | intel_gt_set_wedged(&i915->gt); |
Janusz Krzysztofik | 141f376 | 2019-04-06 11:40:34 +0100 | [diff] [blame] | 1547 | |
Chris Wilson | 4a8ab5e | 2019-01-14 14:21:29 +0000 | [diff] [blame] | 1548 | /* Flush any external code that still may be under the RCU lock */ |
| 1549 | synchronize_rcu(); |
| 1550 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1551 | i915_gem_suspend(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1552 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1553 | drm_atomic_helper_shutdown(&i915->drm); |
Maarten Lankhorst | a667fb4 | 2016-12-15 15:29:44 +0100 | [diff] [blame] | 1554 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1555 | intel_gvt_driver_remove(i915); |
Zhenyu Wang | 26f837e | 2017-01-13 10:46:09 +0800 | [diff] [blame] | 1556 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1557 | intel_modeset_driver_remove(&i915->drm); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1558 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1559 | intel_bios_driver_remove(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1560 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1561 | vga_switcheroo_unregister_client(pdev); |
| 1562 | vga_client_register(pdev, NULL, NULL, NULL); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1563 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1564 | intel_csr_ucode_fini(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1565 | |
| 1566 | /* Free error state after interrupts are fully disabled. */ |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1567 | cancel_delayed_work_sync(&i915->gt.hangcheck.work); |
| 1568 | i915_reset_error_state(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1569 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1570 | i915_gem_driver_remove(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1571 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1572 | intel_power_domains_driver_remove(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1573 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1574 | i915_driver_hw_remove(i915); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1575 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1576 | enable_rpm_wakeref_asserts(&i915->runtime_pm); |
Chris Wilson | cad3688 | 2017-02-10 16:35:21 +0000 | [diff] [blame] | 1577 | } |
| 1578 | |
| 1579 | static void i915_driver_release(struct drm_device *dev) |
| 1580 | { |
| 1581 | struct drm_i915_private *dev_priv = to_i915(dev); |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1582 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1583 | |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1584 | disable_rpm_wakeref_asserts(rpm); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1585 | |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1586 | i915_gem_driver_release(dev_priv); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1587 | |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1588 | i915_ggtt_driver_release(dev_priv); |
Daniele Ceraolo Spurio | 19e0a8d | 2019-06-19 18:00:17 -0700 | [diff] [blame] | 1589 | |
| 1590 | /* Paranoia: make sure we have disabled everything before we exit. */ |
| 1591 | intel_sanitize_gt_powersave(dev_priv); |
| 1592 | |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1593 | i915_driver_mmio_release(dev_priv); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1594 | |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1595 | enable_rpm_wakeref_asserts(rpm); |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1596 | intel_runtime_pm_driver_release(rpm); |
Janusz Krzysztofik | 47bc28d | 2019-05-30 15:31:05 +0200 | [diff] [blame] | 1597 | |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1598 | i915_driver_late_release(dev_priv); |
Chris Wilson | 31962ca | 2018-09-05 15:09:21 +0100 | [diff] [blame] | 1599 | i915_driver_destroy(dev_priv); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1600 | } |
| 1601 | |
| 1602 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
| 1603 | { |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1604 | struct drm_i915_private *i915 = to_i915(dev); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1605 | int ret; |
| 1606 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1607 | ret = i915_gem_open(i915, file); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1608 | if (ret) |
| 1609 | return ret; |
| 1610 | |
| 1611 | return 0; |
| 1612 | } |
| 1613 | |
| 1614 | /** |
| 1615 | * i915_driver_lastclose - clean up after all DRM clients have exited |
| 1616 | * @dev: DRM device |
| 1617 | * |
| 1618 | * Take care of cleaning up after all DRM clients have exited. In the |
| 1619 | * mode setting case, we want to restore the kernel's initial mode (just |
| 1620 | * in case the last client left us in a bad state). |
| 1621 | * |
| 1622 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
| 1623 | * and DMA structures, since the kernel won't be using them, and clea |
| 1624 | * up any GEM state. |
| 1625 | */ |
| 1626 | static void i915_driver_lastclose(struct drm_device *dev) |
| 1627 | { |
| 1628 | intel_fbdev_restore_mode(dev); |
| 1629 | vga_switcheroo_process_delayed_switch(); |
| 1630 | } |
| 1631 | |
Daniel Vetter | 7d2ec88 | 2017-03-08 15:12:45 +0100 | [diff] [blame] | 1632 | static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1633 | { |
Daniel Vetter | 7d2ec88 | 2017-03-08 15:12:45 +0100 | [diff] [blame] | 1634 | struct drm_i915_file_private *file_priv = file->driver_priv; |
| 1635 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1636 | mutex_lock(&dev->struct_mutex); |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 1637 | i915_gem_context_close(file); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1638 | i915_gem_release(dev, file); |
| 1639 | mutex_unlock(&dev->struct_mutex); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1640 | |
| 1641 | kfree(file_priv); |
Chris Wilson | 515b8b7 | 2019-08-02 22:21:37 +0100 | [diff] [blame] | 1642 | |
| 1643 | /* Catch up with all the deferred frees from "this" client */ |
| 1644 | i915_gem_flush_free_objects(to_i915(dev)); |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 1645 | } |
| 1646 | |
Imre Deak | 07f9cd0 | 2014-08-18 14:42:45 +0300 | [diff] [blame] | 1647 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
| 1648 | { |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 1649 | struct drm_device *dev = &dev_priv->drm; |
Jani Nikula | 19c8054 | 2015-12-16 12:48:16 +0200 | [diff] [blame] | 1650 | struct intel_encoder *encoder; |
Imre Deak | 07f9cd0 | 2014-08-18 14:42:45 +0300 | [diff] [blame] | 1651 | |
| 1652 | drm_modeset_lock_all(dev); |
Jani Nikula | 19c8054 | 2015-12-16 12:48:16 +0200 | [diff] [blame] | 1653 | for_each_intel_encoder(dev, encoder) |
| 1654 | if (encoder->suspend) |
| 1655 | encoder->suspend(encoder); |
Imre Deak | 07f9cd0 | 2014-08-18 14:42:45 +0300 | [diff] [blame] | 1656 | drm_modeset_unlock_all(dev); |
| 1657 | } |
| 1658 | |
Paulo Zanoni | 1a5df18 | 2014-10-27 17:54:32 -0200 | [diff] [blame] | 1659 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
| 1660 | bool rpm_resume); |
Imre Deak | 507e126 | 2016-04-20 20:27:54 +0300 | [diff] [blame] | 1661 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
Suketu Shah | f75a198 | 2015-04-16 14:22:11 +0530 | [diff] [blame] | 1662 | |
Imre Deak | bc87229 | 2015-11-18 17:32:30 +0200 | [diff] [blame] | 1663 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
| 1664 | { |
| 1665 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) |
| 1666 | if (acpi_target_system_state() < ACPI_STATE_S3) |
| 1667 | return true; |
| 1668 | #endif |
| 1669 | return false; |
| 1670 | } |
Sagar Kamble | ebc3282 | 2014-08-13 23:07:05 +0530 | [diff] [blame] | 1671 | |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1672 | static int i915_drm_prepare(struct drm_device *dev) |
| 1673 | { |
| 1674 | struct drm_i915_private *i915 = to_i915(dev); |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1675 | |
| 1676 | /* |
| 1677 | * NB intel_display_suspend() may issue new requests after we've |
| 1678 | * ostensibly marked the GPU as ready-to-sleep here. We need to |
| 1679 | * split out that work and pull it forward so that after point, |
| 1680 | * the GPU is not woken again. |
| 1681 | */ |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 1682 | i915_gem_suspend(i915); |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1683 | |
Chris Wilson | 5861b01 | 2019-03-08 09:36:54 +0000 | [diff] [blame] | 1684 | return 0; |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1685 | } |
| 1686 | |
Imre Deak | 5e365c3 | 2014-10-23 19:23:25 +0300 | [diff] [blame] | 1687 | static int i915_drm_suspend(struct drm_device *dev) |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1688 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 1689 | struct drm_i915_private *dev_priv = to_i915(dev); |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1690 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Jesse Barnes | e5747e3 | 2014-06-12 08:35:47 -0700 | [diff] [blame] | 1691 | pci_power_t opregion_target_state; |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 1692 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1693 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1694 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1695 | /* We do a lot of poking in a lot of registers, make sure they work |
| 1696 | * properly. */ |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1697 | intel_power_domains_disable(dev_priv); |
Paulo Zanoni | cb10799 | 2013-01-25 16:59:15 -0200 | [diff] [blame] | 1698 | |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 1699 | drm_kms_helper_poll_disable(dev); |
| 1700 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1701 | pci_save_state(pdev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1702 | |
Maarten Lankhorst | 6b72d48 | 2015-06-01 12:49:47 +0200 | [diff] [blame] | 1703 | intel_display_suspend(dev); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1704 | |
Ville Syrjälä | 1a4313d | 2018-07-05 19:43:52 +0300 | [diff] [blame] | 1705 | intel_dp_mst_suspend(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1706 | |
| 1707 | intel_runtime_pm_disable_interrupts(dev_priv); |
| 1708 | intel_hpd_cancel_work(dev_priv); |
| 1709 | |
| 1710 | intel_suspend_encoders(dev_priv); |
| 1711 | |
Ville Syrjälä | 712bf36 | 2016-10-31 22:37:23 +0200 | [diff] [blame] | 1712 | intel_suspend_hw(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1713 | |
Tvrtko Ursulin | 275a991 | 2016-11-16 08:55:34 +0000 | [diff] [blame] | 1714 | i915_gem_suspend_gtt_mappings(dev_priv); |
Ben Widawsky | 828c790 | 2013-10-16 09:21:30 -0700 | [diff] [blame] | 1715 | |
Tvrtko Ursulin | af6dc74 | 2016-12-01 14:16:44 +0000 | [diff] [blame] | 1716 | i915_save_state(dev_priv); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1717 | |
Imre Deak | bc87229 | 2015-11-18 17:32:30 +0200 | [diff] [blame] | 1718 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
Chris Wilson | a950adc | 2018-10-30 11:05:54 +0000 | [diff] [blame] | 1719 | intel_opregion_suspend(dev_priv, opregion_target_state); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1720 | |
Chris Wilson | 82e3b8c | 2014-08-13 13:09:46 +0100 | [diff] [blame] | 1721 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
Dave Airlie | 3fa016a | 2012-03-28 10:48:49 +0100 | [diff] [blame] | 1722 | |
Mika Kuoppala | 62d5d69 | 2014-02-25 17:11:28 +0200 | [diff] [blame] | 1723 | dev_priv->suspend_count++; |
| 1724 | |
Imre Deak | f74ed08 | 2016-04-18 14:48:21 +0300 | [diff] [blame] | 1725 | intel_csr_ucode_suspend(dev_priv); |
Imre Deak | f514c2d | 2015-10-28 23:59:06 +0200 | [diff] [blame] | 1726 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1727 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1728 | |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1729 | return 0; |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1730 | } |
| 1731 | |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1732 | static enum i915_drm_suspend_mode |
| 1733 | get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) |
| 1734 | { |
| 1735 | if (hibernate) |
| 1736 | return I915_DRM_SUSPEND_HIBERNATE; |
| 1737 | |
| 1738 | if (suspend_to_idle(dev_priv)) |
| 1739 | return I915_DRM_SUSPEND_IDLE; |
| 1740 | |
| 1741 | return I915_DRM_SUSPEND_MEM; |
| 1742 | } |
| 1743 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 1744 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1745 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 1746 | struct drm_i915_private *dev_priv = to_i915(dev); |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1747 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1748 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 1749 | int ret = 0; |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1750 | |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1751 | disable_rpm_wakeref_asserts(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1752 | |
Chris Wilson | ec92ad0 | 2018-05-31 09:22:46 +0100 | [diff] [blame] | 1753 | i915_gem_suspend_late(dev_priv); |
| 1754 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1755 | intel_uncore_suspend(&dev_priv->uncore); |
Imre Deak | 4c494a5 | 2016-10-13 14:34:06 +0300 | [diff] [blame] | 1756 | |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1757 | intel_power_domains_suspend(dev_priv, |
| 1758 | get_suspend_mode(dev_priv, hibernation)); |
Imre Deak | 73dfc22 | 2015-11-17 17:33:53 +0200 | [diff] [blame] | 1759 | |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 1760 | intel_display_power_suspend_late(dev_priv); |
| 1761 | |
| 1762 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Imre Deak | 507e126 | 2016-04-20 20:27:54 +0300 | [diff] [blame] | 1763 | ret = vlv_suspend_complete(dev_priv); |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1764 | |
| 1765 | if (ret) { |
| 1766 | DRM_ERROR("Suspend complete failed: %d\n", ret); |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1767 | intel_power_domains_resume(dev_priv); |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1768 | |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1769 | goto out; |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1770 | } |
| 1771 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1772 | pci_disable_device(pdev); |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 1773 | /* |
Imre Deak | 5487557 | 2015-06-30 17:06:47 +0300 | [diff] [blame] | 1774 | * During hibernation on some platforms the BIOS may try to access |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 1775 | * the device even though it's already in D3 and hang the machine. So |
| 1776 | * leave the device in D0 on those platforms and hope the BIOS will |
Imre Deak | 5487557 | 2015-06-30 17:06:47 +0300 | [diff] [blame] | 1777 | * power down the device properly. The issue was seen on multiple old |
| 1778 | * GENs with different BIOS vendors, so having an explicit blacklist |
| 1779 | * is inpractical; apply the workaround on everything pre GEN6. The |
| 1780 | * platforms where the issue was seen: |
| 1781 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 |
| 1782 | * Fujitsu FSC S7110 |
| 1783 | * Acer Aspire 1830T |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 1784 | */ |
Tvrtko Ursulin | 514e1d6 | 2016-11-04 14:42:48 +0000 | [diff] [blame] | 1785 | if (!(hibernation && INTEL_GEN(dev_priv) < 6)) |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1786 | pci_set_power_state(pdev, PCI_D3hot); |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1787 | |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1788 | out: |
Daniele Ceraolo Spurio | 69c6635 | 2019-06-13 16:21:53 -0700 | [diff] [blame] | 1789 | enable_rpm_wakeref_asserts(rpm); |
Daniele Ceraolo Spurio | 0a9b263 | 2019-08-09 07:31:16 +0100 | [diff] [blame] | 1790 | if (!dev_priv->uncore.user_forcewake_count) |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 1791 | intel_runtime_pm_driver_release(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1792 | |
| 1793 | return ret; |
Imre Deak | c3c09c9 | 2014-10-23 19:23:15 +0300 | [diff] [blame] | 1794 | } |
| 1795 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1796 | static int |
| 1797 | i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1798 | { |
| 1799 | int error; |
| 1800 | |
Imre Deak | 0b14cbd | 2014-09-10 18:16:55 +0300 | [diff] [blame] | 1801 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
| 1802 | state.event != PM_EVENT_FREEZE)) |
| 1803 | return -EINVAL; |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 1804 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1805 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 1806 | return 0; |
Chris Wilson | 6eecba3 | 2010-09-08 09:45:11 +0100 | [diff] [blame] | 1807 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1808 | error = i915_drm_suspend(&i915->drm); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1809 | if (error) |
| 1810 | return error; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 1811 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1812 | return i915_drm_suspend_late(&i915->drm, false); |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 1813 | } |
| 1814 | |
Imre Deak | 5e365c3 | 2014-10-23 19:23:25 +0300 | [diff] [blame] | 1815 | static int i915_drm_resume(struct drm_device *dev) |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 1816 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 1817 | struct drm_i915_private *dev_priv = to_i915(dev); |
Ville Syrjälä | ac840ae | 2016-05-06 21:35:55 +0300 | [diff] [blame] | 1818 | int ret; |
Matthew Garrett | 8ee1c3d | 2008-08-05 19:37:25 +0100 | [diff] [blame] | 1819 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1820 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Chris Wilson | abc80ab | 2016-08-24 10:27:01 +0100 | [diff] [blame] | 1821 | intel_sanitize_gt_powersave(dev_priv); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1822 | |
Chris Wilson | 1288786 | 2018-06-14 10:40:59 +0100 | [diff] [blame] | 1823 | i915_gem_sanitize(dev_priv); |
| 1824 | |
Chris Wilson | 97d6d7a | 2016-08-04 07:52:22 +0100 | [diff] [blame] | 1825 | ret = i915_ggtt_enable_hw(dev_priv); |
Ville Syrjälä | ac840ae | 2016-05-06 21:35:55 +0300 | [diff] [blame] | 1826 | if (ret) |
| 1827 | DRM_ERROR("failed to re-enable GGTT\n"); |
| 1828 | |
Imre Deak | f74ed08 | 2016-04-18 14:48:21 +0300 | [diff] [blame] | 1829 | intel_csr_ucode_resume(dev_priv); |
| 1830 | |
Tvrtko Ursulin | af6dc74 | 2016-12-01 14:16:44 +0000 | [diff] [blame] | 1831 | i915_restore_state(dev_priv); |
Imre Deak | 8090ba8 | 2016-08-10 14:07:33 +0300 | [diff] [blame] | 1832 | intel_pps_unlock_regs_wa(dev_priv); |
Rafael J. Wysocki | 61caf87 | 2010-02-18 23:06:27 +0100 | [diff] [blame] | 1833 | |
Ander Conselvan de Oliveira | c39055b | 2016-11-23 16:21:44 +0200 | [diff] [blame] | 1834 | intel_init_pch_refclk(dev_priv); |
Chris Wilson | 1833b13 | 2012-05-09 11:56:28 +0100 | [diff] [blame] | 1835 | |
Peter Antoine | 364aece | 2015-05-11 08:50:45 +0100 | [diff] [blame] | 1836 | /* |
| 1837 | * Interrupts have to be enabled before any batches are run. If not the |
| 1838 | * GPU will hang. i915_gem_init_hw() will initiate batches to |
| 1839 | * update/restore the context. |
| 1840 | * |
Imre Deak | 908764f | 2016-11-29 21:40:29 +0200 | [diff] [blame] | 1841 | * drm_mode_config_reset() needs AUX interrupts. |
| 1842 | * |
Peter Antoine | 364aece | 2015-05-11 08:50:45 +0100 | [diff] [blame] | 1843 | * Modeset enabling in intel_modeset_init_hw() also needs working |
| 1844 | * interrupts. |
| 1845 | */ |
| 1846 | intel_runtime_pm_enable_interrupts(dev_priv); |
| 1847 | |
Imre Deak | 908764f | 2016-11-29 21:40:29 +0200 | [diff] [blame] | 1848 | drm_mode_config_reset(dev); |
| 1849 | |
Chris Wilson | 37cd330 | 2017-11-12 11:27:38 +0000 | [diff] [blame] | 1850 | i915_gem_resume(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1851 | |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1852 | intel_modeset_init_hw(dev); |
Ville Syrjälä | 675f7ff | 2017-11-16 18:02:15 +0200 | [diff] [blame] | 1853 | intel_init_clock_gating(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1854 | |
| 1855 | spin_lock_irq(&dev_priv->irq_lock); |
| 1856 | if (dev_priv->display.hpd_irq_setup) |
Tvrtko Ursulin | 91d1425 | 2016-05-06 14:48:28 +0100 | [diff] [blame] | 1857 | dev_priv->display.hpd_irq_setup(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1858 | spin_unlock_irq(&dev_priv->irq_lock); |
| 1859 | |
Ville Syrjälä | 1a4313d | 2018-07-05 19:43:52 +0300 | [diff] [blame] | 1860 | intel_dp_mst_resume(dev_priv); |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1861 | |
Lyude | a16b765 | 2016-03-11 10:57:01 -0500 | [diff] [blame] | 1862 | intel_display_resume(dev); |
| 1863 | |
Lyude | e0b7006 | 2016-11-01 21:06:30 -0400 | [diff] [blame] | 1864 | drm_kms_helper_poll_enable(dev); |
| 1865 | |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1866 | /* |
| 1867 | * ... but also need to make sure that hotplug processing |
| 1868 | * doesn't cause havoc. Like in the driver load code we don't |
Gwan-gyeong Mun | c444ad7 | 2018-08-03 19:41:50 +0300 | [diff] [blame] | 1869 | * bother with the tiny race here where we might lose hotplug |
Daniel Vetter | d581893 | 2015-02-23 12:03:26 +0100 | [diff] [blame] | 1870 | * notifications. |
| 1871 | * */ |
| 1872 | intel_hpd_init(dev_priv); |
Jesse Barnes | 1daed3f | 2011-01-05 12:01:25 -0800 | [diff] [blame] | 1873 | |
Chris Wilson | a950adc | 2018-10-30 11:05:54 +0000 | [diff] [blame] | 1874 | intel_opregion_resume(dev_priv); |
Chris Wilson | 44834a6 | 2010-08-19 16:09:23 +0100 | [diff] [blame] | 1875 | |
Chris Wilson | 82e3b8c | 2014-08-13 13:09:46 +0100 | [diff] [blame] | 1876 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
Jesse Barnes | 073f34d | 2012-11-02 11:13:59 -0700 | [diff] [blame] | 1877 | |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1878 | intel_power_domains_enable(dev_priv); |
| 1879 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1880 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1881 | |
Chris Wilson | 074c6ad | 2014-04-09 09:19:43 +0100 | [diff] [blame] | 1882 | return 0; |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1883 | } |
| 1884 | |
Imre Deak | 5e365c3 | 2014-10-23 19:23:25 +0300 | [diff] [blame] | 1885 | static int i915_drm_resume_early(struct drm_device *dev) |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1886 | { |
Chris Wilson | fac5e23 | 2016-07-04 11:34:36 +0100 | [diff] [blame] | 1887 | struct drm_i915_private *dev_priv = to_i915(dev); |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1888 | struct pci_dev *pdev = dev_priv->drm.pdev; |
Imre Deak | 44410cd | 2016-04-18 14:45:54 +0300 | [diff] [blame] | 1889 | int ret; |
Imre Deak | 36d61e6 | 2014-10-23 19:23:24 +0300 | [diff] [blame] | 1890 | |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 1891 | /* |
| 1892 | * We have a resume ordering issue with the snd-hda driver also |
| 1893 | * requiring our device to be power up. Due to the lack of a |
| 1894 | * parent/child relationship we currently solve this with an early |
| 1895 | * resume hook. |
| 1896 | * |
| 1897 | * FIXME: This should be solved with a special hdmi sink device or |
| 1898 | * similar so that power domains can be employed. |
| 1899 | */ |
Imre Deak | 44410cd | 2016-04-18 14:45:54 +0300 | [diff] [blame] | 1900 | |
| 1901 | /* |
| 1902 | * Note that we need to set the power state explicitly, since we |
| 1903 | * powered off the device during freeze and the PCI core won't power |
| 1904 | * it back up for us during thaw. Powering off the device during |
| 1905 | * freeze is not a hard requirement though, and during the |
| 1906 | * suspend/resume phases the PCI core makes sure we get here with the |
| 1907 | * device powered on. So in case we change our freeze logic and keep |
| 1908 | * the device powered we can also remove the following set power state |
| 1909 | * call. |
| 1910 | */ |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1911 | ret = pci_set_power_state(pdev, PCI_D0); |
Imre Deak | 44410cd | 2016-04-18 14:45:54 +0300 | [diff] [blame] | 1912 | if (ret) { |
| 1913 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1914 | return ret; |
Imre Deak | 44410cd | 2016-04-18 14:45:54 +0300 | [diff] [blame] | 1915 | } |
| 1916 | |
| 1917 | /* |
| 1918 | * Note that pci_enable_device() first enables any parent bridge |
| 1919 | * device and only then sets the power state for this device. The |
| 1920 | * bridge enabling is a nop though, since bridge devices are resumed |
| 1921 | * first. The order of enabling power and enabling the device is |
| 1922 | * imposed by the PCI core as described above, so here we preserve the |
| 1923 | * same order for the freeze/thaw phases. |
| 1924 | * |
| 1925 | * TODO: eventually we should remove pci_disable_device() / |
| 1926 | * pci_enable_enable_device() from suspend/resume. Due to how they |
| 1927 | * depend on the device enable refcount we can't anyway depend on them |
| 1928 | * disabling/enabling the device. |
| 1929 | */ |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1930 | if (pci_enable_device(pdev)) |
| 1931 | return -EIO; |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1932 | |
David Weinehall | 52a05c3 | 2016-08-22 13:32:44 +0300 | [diff] [blame] | 1933 | pci_set_master(pdev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1934 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1935 | disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1936 | |
Wayne Boyer | 666a453 | 2015-12-09 12:29:35 -0800 | [diff] [blame] | 1937 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Paulo Zanoni | 1a5df18 | 2014-10-27 17:54:32 -0200 | [diff] [blame] | 1938 | ret = vlv_resume_prepare(dev_priv, false); |
Imre Deak | 36d61e6 | 2014-10-23 19:23:24 +0300 | [diff] [blame] | 1939 | if (ret) |
Damien Lespiau | ff0b187 | 2015-05-20 14:45:15 +0100 | [diff] [blame] | 1940 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
| 1941 | ret); |
Imre Deak | 36d61e6 | 2014-10-23 19:23:24 +0300 | [diff] [blame] | 1942 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 1943 | intel_uncore_resume_early(&dev_priv->uncore); |
| 1944 | |
Tvrtko Ursulin | eaf522f | 2019-06-21 08:07:44 +0100 | [diff] [blame] | 1945 | intel_gt_check_and_clear_faults(&dev_priv->gt); |
Paulo Zanoni | efee833 | 2014-10-27 17:54:33 -0200 | [diff] [blame] | 1946 | |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 1947 | intel_display_power_resume_early(dev_priv); |
Paulo Zanoni | efee833 | 2014-10-27 17:54:33 -0200 | [diff] [blame] | 1948 | |
Daniele Ceraolo Spurio | 19e0a8d | 2019-06-19 18:00:17 -0700 | [diff] [blame] | 1949 | intel_sanitize_gt_powersave(dev_priv); |
Imre Deak | bc87229 | 2015-11-18 17:32:30 +0200 | [diff] [blame] | 1950 | |
Imre Deak | 2cd9a68 | 2018-08-16 15:37:57 +0300 | [diff] [blame] | 1951 | intel_power_domains_resume(dev_priv); |
Imre Deak | bc87229 | 2015-11-18 17:32:30 +0200 | [diff] [blame] | 1952 | |
Chris Wilson | 0c91621 | 2019-06-25 14:01:10 +0100 | [diff] [blame] | 1953 | intel_gt_sanitize(&dev_priv->gt, true); |
Chris Wilson | 4fdd5b4 | 2018-06-16 21:25:34 +0100 | [diff] [blame] | 1954 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 1955 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
Imre Deak | 6e35e8a | 2016-04-18 10:04:19 +0300 | [diff] [blame] | 1956 | |
Imre Deak | 36d61e6 | 2014-10-23 19:23:24 +0300 | [diff] [blame] | 1957 | return ret; |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 1958 | } |
| 1959 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1960 | static int i915_resume_switcheroo(struct drm_i915_private *i915) |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 1961 | { |
Imre Deak | 50a0072 | 2014-10-23 19:23:17 +0300 | [diff] [blame] | 1962 | int ret; |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 1963 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1964 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Imre Deak | 097dd83 | 2014-10-23 19:23:19 +0300 | [diff] [blame] | 1965 | return 0; |
| 1966 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1967 | ret = i915_drm_resume_early(&i915->drm); |
Imre Deak | 50a0072 | 2014-10-23 19:23:17 +0300 | [diff] [blame] | 1968 | if (ret) |
| 1969 | return ret; |
| 1970 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1971 | return i915_drm_resume(&i915->drm); |
Imre Deak | 5a17514 | 2014-10-23 19:23:18 +0300 | [diff] [blame] | 1972 | } |
| 1973 | |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1974 | static int i915_pm_prepare(struct device *kdev) |
| 1975 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1976 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1977 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1978 | if (!i915) { |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1979 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
| 1980 | return -ENODEV; |
| 1981 | } |
| 1982 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1983 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1984 | return 0; |
| 1985 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1986 | return i915_drm_prepare(&i915->drm); |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 1987 | } |
| 1988 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 1989 | static int i915_pm_suspend(struct device *kdev) |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1990 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1991 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1992 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1993 | if (!i915) { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 1994 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 1995 | return -ENODEV; |
| 1996 | } |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 1997 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 1998 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Dave Airlie | 5bcf719 | 2010-12-07 09:20:40 +1000 | [diff] [blame] | 1999 | return 0; |
| 2000 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2001 | return i915_drm_suspend(&i915->drm); |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2002 | } |
| 2003 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2004 | static int i915_pm_suspend_late(struct device *kdev) |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2005 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2006 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2007 | |
| 2008 | /* |
Damien Lespiau | c965d995 | 2015-05-18 19:53:48 +0100 | [diff] [blame] | 2009 | * We have a suspend ordering issue with the snd-hda driver also |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2010 | * requiring our device to be power up. Due to the lack of a |
| 2011 | * parent/child relationship we currently solve this with an late |
| 2012 | * suspend hook. |
| 2013 | * |
| 2014 | * FIXME: This should be solved with a special hdmi sink device or |
| 2015 | * similar so that power domains can be employed. |
| 2016 | */ |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2017 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2018 | return 0; |
Kristian Høgsberg | 112b715 | 2009-01-04 16:55:33 -0500 | [diff] [blame] | 2019 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2020 | return i915_drm_suspend_late(&i915->drm, false); |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 2021 | } |
| 2022 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2023 | static int i915_pm_poweroff_late(struct device *kdev) |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 2024 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2025 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 2026 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2027 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 2028 | return 0; |
| 2029 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2030 | return i915_drm_suspend_late(&i915->drm, true); |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 2031 | } |
| 2032 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2033 | static int i915_pm_resume_early(struct device *kdev) |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2034 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2035 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2036 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2037 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Imre Deak | 097dd83 | 2014-10-23 19:23:19 +0300 | [diff] [blame] | 2038 | return 0; |
| 2039 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2040 | return i915_drm_resume_early(&i915->drm); |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2041 | } |
| 2042 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2043 | static int i915_pm_resume(struct device *kdev) |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 2044 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2045 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Rafael J. Wysocki | 84b79f8 | 2010-02-07 21:48:24 +0100 | [diff] [blame] | 2046 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2047 | if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
Imre Deak | 097dd83 | 2014-10-23 19:23:19 +0300 | [diff] [blame] | 2048 | return 0; |
| 2049 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2050 | return i915_drm_resume(&i915->drm); |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 2051 | } |
| 2052 | |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2053 | /* freeze: before creating the hibernation_image */ |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2054 | static int i915_pm_freeze(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2055 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2056 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2057 | int ret; |
| 2058 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2059 | if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
| 2060 | ret = i915_drm_suspend(&i915->drm); |
Imre Deak | dd9f31c | 2017-08-16 17:46:07 +0300 | [diff] [blame] | 2061 | if (ret) |
| 2062 | return ret; |
| 2063 | } |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2064 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2065 | ret = i915_gem_freeze(i915); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 2066 | if (ret) |
| 2067 | return ret; |
| 2068 | |
| 2069 | return 0; |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2070 | } |
| 2071 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2072 | static int i915_pm_freeze_late(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2073 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2074 | struct drm_i915_private *i915 = kdev_to_i915(kdev); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2075 | int ret; |
| 2076 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2077 | if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
| 2078 | ret = i915_drm_suspend_late(&i915->drm, true); |
Imre Deak | dd9f31c | 2017-08-16 17:46:07 +0300 | [diff] [blame] | 2079 | if (ret) |
| 2080 | return ret; |
| 2081 | } |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2082 | |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2083 | ret = i915_gem_freeze_late(i915); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 2084 | if (ret) |
| 2085 | return ret; |
| 2086 | |
| 2087 | return 0; |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2088 | } |
| 2089 | |
| 2090 | /* thaw: called after creating the hibernation image, but before turning off. */ |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2091 | static int i915_pm_thaw_early(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2092 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2093 | return i915_pm_resume_early(kdev); |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2094 | } |
| 2095 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2096 | static int i915_pm_thaw(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2097 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2098 | return i915_pm_resume(kdev); |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2099 | } |
| 2100 | |
| 2101 | /* restore: called after loading the hibernation image. */ |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2102 | static int i915_pm_restore_early(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2103 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2104 | return i915_pm_resume_early(kdev); |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2105 | } |
| 2106 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2107 | static int i915_pm_restore(struct device *kdev) |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2108 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2109 | return i915_pm_resume(kdev); |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2110 | } |
| 2111 | |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2112 | /* |
| 2113 | * Save all Gunit registers that may be lost after a D3 and a subsequent |
| 2114 | * S0i[R123] transition. The list of registers needing a save/restore is |
| 2115 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit |
| 2116 | * registers in the following way: |
| 2117 | * - Driver: saved/restored by the driver |
| 2118 | * - Punit : saved/restored by the Punit firmware |
| 2119 | * - No, w/o marking: no need to save/restore, since the register is R/O or |
| 2120 | * used internally by the HW in a way that doesn't depend |
| 2121 | * keeping the content across a suspend/resume. |
| 2122 | * - Debug : used for debugging |
| 2123 | * |
| 2124 | * We save/restore all registers marked with 'Driver', with the following |
| 2125 | * exceptions: |
| 2126 | * - Registers out of use, including also registers marked with 'Debug'. |
| 2127 | * These have no effect on the driver's operation, so we don't save/restore |
| 2128 | * them to reduce the overhead. |
| 2129 | * - Registers that are fully setup by an initialization function called from |
| 2130 | * the resume path. For example many clock gating and RPS/RC6 registers. |
| 2131 | * - Registers that provide the right functionality with their reset defaults. |
| 2132 | * |
| 2133 | * TODO: Except for registers that based on the above 3 criteria can be safely |
| 2134 | * ignored, we save/restore all others, practically treating the HW context as |
| 2135 | * a black-box for the driver. Further investigation is needed to reduce the |
| 2136 | * saved/restored registers even further, by following the same 3 criteria. |
| 2137 | */ |
| 2138 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
| 2139 | { |
| 2140 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
| 2141 | int i; |
| 2142 | |
| 2143 | /* GAM 0x4000-0x4770 */ |
| 2144 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); |
| 2145 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); |
| 2146 | s->arb_mode = I915_READ(ARB_MODE); |
| 2147 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); |
| 2148 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); |
| 2149 | |
| 2150 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
Ville Syrjälä | 22dfe79 | 2015-09-18 20:03:16 +0300 | [diff] [blame] | 2151 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2152 | |
| 2153 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
Imre Deak | b5f1c97 | 2015-04-15 16:52:30 -0700 | [diff] [blame] | 2154 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2155 | |
| 2156 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
| 2157 | s->ecochk = I915_READ(GAM_ECOCHK); |
| 2158 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); |
| 2159 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); |
| 2160 | |
| 2161 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); |
| 2162 | |
| 2163 | /* MBC 0x9024-0x91D0, 0x8500 */ |
| 2164 | s->g3dctl = I915_READ(VLV_G3DCTL); |
| 2165 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); |
| 2166 | s->mbctl = I915_READ(GEN6_MBCTL); |
| 2167 | |
| 2168 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
| 2169 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); |
| 2170 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); |
| 2171 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); |
| 2172 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); |
| 2173 | s->rstctl = I915_READ(GEN6_RSTCTL); |
| 2174 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); |
| 2175 | |
| 2176 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
| 2177 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); |
| 2178 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); |
| 2179 | s->rpdeuc = I915_READ(GEN6_RPDEUC); |
| 2180 | s->ecobus = I915_READ(ECOBUS); |
| 2181 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); |
| 2182 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); |
| 2183 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); |
| 2184 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); |
| 2185 | s->rcedata = I915_READ(VLV_RCEDATA); |
| 2186 | s->spare2gh = I915_READ(VLV_SPAREG2H); |
| 2187 | |
| 2188 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
| 2189 | s->gt_imr = I915_READ(GTIMR); |
| 2190 | s->gt_ier = I915_READ(GTIER); |
| 2191 | s->pm_imr = I915_READ(GEN6_PMIMR); |
| 2192 | s->pm_ier = I915_READ(GEN6_PMIER); |
| 2193 | |
| 2194 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
Ville Syrjälä | 22dfe79 | 2015-09-18 20:03:16 +0300 | [diff] [blame] | 2195 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2196 | |
| 2197 | /* GT SA CZ domain, 0x100000-0x138124 */ |
| 2198 | s->tilectl = I915_READ(TILECTL); |
| 2199 | s->gt_fifoctl = I915_READ(GTFIFOCTL); |
| 2200 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); |
| 2201 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
| 2202 | s->pmwgicz = I915_READ(VLV_PMWGICZ); |
| 2203 | |
| 2204 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
| 2205 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
| 2206 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
Jesse Barnes | 9c25210 | 2015-04-01 14:22:57 -0700 | [diff] [blame] | 2207 | s->pcbr = I915_READ(VLV_PCBR); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2208 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
| 2209 | |
| 2210 | /* |
| 2211 | * Not saving any of: |
| 2212 | * DFT, 0x9800-0x9EC0 |
| 2213 | * SARB, 0xB000-0xB1FC |
| 2214 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 |
| 2215 | * PCI CFG |
| 2216 | */ |
| 2217 | } |
| 2218 | |
| 2219 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
| 2220 | { |
| 2221 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
| 2222 | u32 val; |
| 2223 | int i; |
| 2224 | |
| 2225 | /* GAM 0x4000-0x4770 */ |
| 2226 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); |
| 2227 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); |
| 2228 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); |
| 2229 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); |
| 2230 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); |
| 2231 | |
| 2232 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
Ville Syrjälä | 22dfe79 | 2015-09-18 20:03:16 +0300 | [diff] [blame] | 2233 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2234 | |
| 2235 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
Imre Deak | b5f1c97 | 2015-04-15 16:52:30 -0700 | [diff] [blame] | 2236 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2237 | |
| 2238 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
| 2239 | I915_WRITE(GAM_ECOCHK, s->ecochk); |
| 2240 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); |
| 2241 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); |
| 2242 | |
| 2243 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); |
| 2244 | |
| 2245 | /* MBC 0x9024-0x91D0, 0x8500 */ |
| 2246 | I915_WRITE(VLV_G3DCTL, s->g3dctl); |
| 2247 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); |
| 2248 | I915_WRITE(GEN6_MBCTL, s->mbctl); |
| 2249 | |
| 2250 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
| 2251 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); |
| 2252 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); |
| 2253 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); |
| 2254 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); |
| 2255 | I915_WRITE(GEN6_RSTCTL, s->rstctl); |
| 2256 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); |
| 2257 | |
| 2258 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
| 2259 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); |
| 2260 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); |
| 2261 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); |
| 2262 | I915_WRITE(ECOBUS, s->ecobus); |
| 2263 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); |
| 2264 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); |
| 2265 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); |
| 2266 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); |
| 2267 | I915_WRITE(VLV_RCEDATA, s->rcedata); |
| 2268 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); |
| 2269 | |
| 2270 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
| 2271 | I915_WRITE(GTIMR, s->gt_imr); |
| 2272 | I915_WRITE(GTIER, s->gt_ier); |
| 2273 | I915_WRITE(GEN6_PMIMR, s->pm_imr); |
| 2274 | I915_WRITE(GEN6_PMIER, s->pm_ier); |
| 2275 | |
| 2276 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
Ville Syrjälä | 22dfe79 | 2015-09-18 20:03:16 +0300 | [diff] [blame] | 2277 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2278 | |
| 2279 | /* GT SA CZ domain, 0x100000-0x138124 */ |
| 2280 | I915_WRITE(TILECTL, s->tilectl); |
| 2281 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); |
| 2282 | /* |
| 2283 | * Preserve the GT allow wake and GFX force clock bit, they are not |
| 2284 | * be restored, as they are used to control the s0ix suspend/resume |
| 2285 | * sequence by the caller. |
| 2286 | */ |
| 2287 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
| 2288 | val &= VLV_GTLC_ALLOWWAKEREQ; |
| 2289 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; |
| 2290 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
| 2291 | |
| 2292 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
| 2293 | val &= VLV_GFX_CLK_FORCE_ON_BIT; |
| 2294 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; |
| 2295 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
| 2296 | |
| 2297 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); |
| 2298 | |
| 2299 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
| 2300 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
| 2301 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
Jesse Barnes | 9c25210 | 2015-04-01 14:22:57 -0700 | [diff] [blame] | 2302 | I915_WRITE(VLV_PCBR, s->pcbr); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2303 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
| 2304 | } |
| 2305 | |
Tvrtko Ursulin | 5a31d30 | 2019-06-11 11:45:47 +0100 | [diff] [blame] | 2306 | static int vlv_wait_for_pw_status(struct drm_i915_private *i915, |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2307 | u32 mask, u32 val) |
| 2308 | { |
Ville Syrjälä | 39806c3f | 2019-02-04 23:16:44 +0200 | [diff] [blame] | 2309 | i915_reg_t reg = VLV_GTLC_PW_STATUS; |
| 2310 | u32 reg_value; |
| 2311 | int ret; |
| 2312 | |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2313 | /* The HW does not like us polling for PW_STATUS frequently, so |
| 2314 | * use the sleeping loop rather than risk the busy spin within |
| 2315 | * intel_wait_for_register(). |
| 2316 | * |
| 2317 | * Transitioning between RC6 states should be at most 2ms (see |
| 2318 | * valleyview_enable_rps) so use a 3ms timeout. |
| 2319 | */ |
Tvrtko Ursulin | 5a31d30 | 2019-06-11 11:45:47 +0100 | [diff] [blame] | 2320 | ret = wait_for(((reg_value = |
| 2321 | intel_uncore_read_notrace(&i915->uncore, reg)) & mask) |
| 2322 | == val, 3); |
Ville Syrjälä | 39806c3f | 2019-02-04 23:16:44 +0200 | [diff] [blame] | 2323 | |
| 2324 | /* just trace the final value */ |
| 2325 | trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); |
| 2326 | |
| 2327 | return ret; |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2328 | } |
| 2329 | |
Imre Deak | 650ad97 | 2014-04-18 16:35:02 +0300 | [diff] [blame] | 2330 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
| 2331 | { |
| 2332 | u32 val; |
| 2333 | int err; |
| 2334 | |
Imre Deak | 650ad97 | 2014-04-18 16:35:02 +0300 | [diff] [blame] | 2335 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
| 2336 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
| 2337 | if (force_on) |
| 2338 | val |= VLV_GFX_CLK_FORCE_ON_BIT; |
| 2339 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
| 2340 | |
| 2341 | if (!force_on) |
| 2342 | return 0; |
| 2343 | |
Daniele Ceraolo Spurio | 97a04e0 | 2019-03-25 14:49:39 -0700 | [diff] [blame] | 2344 | err = intel_wait_for_register(&dev_priv->uncore, |
Chris Wilson | c6ddc5f | 2016-06-30 15:32:46 +0100 | [diff] [blame] | 2345 | VLV_GTLC_SURVIVABILITY_REG, |
| 2346 | VLV_GFX_CLK_STATUS_BIT, |
| 2347 | VLV_GFX_CLK_STATUS_BIT, |
| 2348 | 20); |
Imre Deak | 650ad97 | 2014-04-18 16:35:02 +0300 | [diff] [blame] | 2349 | if (err) |
| 2350 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", |
| 2351 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
| 2352 | |
| 2353 | return err; |
Imre Deak | 650ad97 | 2014-04-18 16:35:02 +0300 | [diff] [blame] | 2354 | } |
| 2355 | |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2356 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
| 2357 | { |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2358 | u32 mask; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2359 | u32 val; |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2360 | int err; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2361 | |
| 2362 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
| 2363 | val &= ~VLV_GTLC_ALLOWWAKEREQ; |
| 2364 | if (allow) |
| 2365 | val |= VLV_GTLC_ALLOWWAKEREQ; |
| 2366 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
| 2367 | POSTING_READ(VLV_GTLC_WAKE_CTRL); |
| 2368 | |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2369 | mask = VLV_GTLC_ALLOWWAKEACK; |
| 2370 | val = allow ? mask : 0; |
| 2371 | |
| 2372 | err = vlv_wait_for_pw_status(dev_priv, mask, val); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2373 | if (err) |
| 2374 | DRM_ERROR("timeout disabling GT waking\n"); |
Chris Wilson | b273669 | 2016-06-30 15:32:47 +0100 | [diff] [blame] | 2375 | |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2376 | return err; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2377 | } |
| 2378 | |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2379 | static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
| 2380 | bool wait_for_on) |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2381 | { |
| 2382 | u32 mask; |
| 2383 | u32 val; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2384 | |
| 2385 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
| 2386 | val = wait_for_on ? mask : 0; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2387 | |
| 2388 | /* |
| 2389 | * RC6 transitioning can be delayed up to 2 msec (see |
| 2390 | * valleyview_enable_rps), use 3 msec for safety. |
Chris Wilson | e01569a | 2018-04-09 10:49:05 +0100 | [diff] [blame] | 2391 | * |
| 2392 | * This can fail to turn off the rc6 if the GPU is stuck after a failed |
| 2393 | * reset and we are trying to force the machine to sleep. |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2394 | */ |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2395 | if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
Chris Wilson | e01569a | 2018-04-09 10:49:05 +0100 | [diff] [blame] | 2396 | DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n", |
| 2397 | onoff(wait_for_on)); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2398 | } |
| 2399 | |
| 2400 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
| 2401 | { |
| 2402 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) |
| 2403 | return; |
| 2404 | |
Daniel Vetter | 6fa283b | 2016-01-19 21:00:56 +0100 | [diff] [blame] | 2405 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2406 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
| 2407 | } |
| 2408 | |
Sagar Kamble | ebc3282 | 2014-08-13 23:07:05 +0530 | [diff] [blame] | 2409 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2410 | { |
| 2411 | u32 mask; |
| 2412 | int err; |
| 2413 | |
| 2414 | /* |
| 2415 | * Bspec defines the following GT well on flags as debug only, so |
| 2416 | * don't treat them as hard failures. |
| 2417 | */ |
Chris Wilson | 3dd14c0 | 2017-04-21 14:58:15 +0100 | [diff] [blame] | 2418 | vlv_wait_for_gt_wells(dev_priv, false); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2419 | |
| 2420 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
| 2421 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
| 2422 | |
| 2423 | vlv_check_no_gt_access(dev_priv); |
| 2424 | |
| 2425 | err = vlv_force_gfx_clock(dev_priv, true); |
| 2426 | if (err) |
| 2427 | goto err1; |
| 2428 | |
| 2429 | err = vlv_allow_gt_wake(dev_priv, false); |
| 2430 | if (err) |
| 2431 | goto err2; |
Deepak S | 9871116 | 2014-12-12 14:18:16 +0530 | [diff] [blame] | 2432 | |
Joonas Lahtinen | 2d1fe07 | 2016-04-07 11:08:05 +0300 | [diff] [blame] | 2433 | if (!IS_CHERRYVIEW(dev_priv)) |
Deepak S | 9871116 | 2014-12-12 14:18:16 +0530 | [diff] [blame] | 2434 | vlv_save_gunit_s0ix_state(dev_priv); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2435 | |
| 2436 | err = vlv_force_gfx_clock(dev_priv, false); |
| 2437 | if (err) |
| 2438 | goto err2; |
| 2439 | |
| 2440 | return 0; |
| 2441 | |
| 2442 | err2: |
| 2443 | /* For safety always re-enable waking and disable gfx clock forcing */ |
| 2444 | vlv_allow_gt_wake(dev_priv, true); |
| 2445 | err1: |
| 2446 | vlv_force_gfx_clock(dev_priv, false); |
| 2447 | |
| 2448 | return err; |
| 2449 | } |
| 2450 | |
Sagar Kamble | 016970b | 2014-08-13 23:07:06 +0530 | [diff] [blame] | 2451 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
| 2452 | bool rpm_resume) |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2453 | { |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2454 | int err; |
| 2455 | int ret; |
| 2456 | |
| 2457 | /* |
| 2458 | * If any of the steps fail just try to continue, that's the best we |
| 2459 | * can do at this point. Return the first error code (which will also |
| 2460 | * leave RPM permanently disabled). |
| 2461 | */ |
| 2462 | ret = vlv_force_gfx_clock(dev_priv, true); |
| 2463 | |
Joonas Lahtinen | 2d1fe07 | 2016-04-07 11:08:05 +0300 | [diff] [blame] | 2464 | if (!IS_CHERRYVIEW(dev_priv)) |
Deepak S | 9871116 | 2014-12-12 14:18:16 +0530 | [diff] [blame] | 2465 | vlv_restore_gunit_s0ix_state(dev_priv); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2466 | |
| 2467 | err = vlv_allow_gt_wake(dev_priv, true); |
| 2468 | if (!ret) |
| 2469 | ret = err; |
| 2470 | |
| 2471 | err = vlv_force_gfx_clock(dev_priv, false); |
| 2472 | if (!ret) |
| 2473 | ret = err; |
| 2474 | |
| 2475 | vlv_check_no_gt_access(dev_priv); |
| 2476 | |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 2477 | if (rpm_resume) |
Ville Syrjälä | 46f16e6 | 2016-10-31 22:37:22 +0200 | [diff] [blame] | 2478 | intel_init_clock_gating(dev_priv); |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2479 | |
| 2480 | return ret; |
| 2481 | } |
| 2482 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2483 | static int intel_runtime_suspend(struct device *kdev) |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2484 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2485 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
Daniele Ceraolo Spurio | 1bf676c | 2019-06-13 16:21:52 -0700 | [diff] [blame] | 2486 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 2487 | int ret = 0; |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2488 | |
Chris Wilson | fb6db0f | 2017-12-01 11:30:30 +0000 | [diff] [blame] | 2489 | if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) |
Imre Deak | c6df39b | 2014-04-14 20:24:29 +0300 | [diff] [blame] | 2490 | return -ENODEV; |
| 2491 | |
Tvrtko Ursulin | 6772ffe | 2016-10-13 11:02:55 +0100 | [diff] [blame] | 2492 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
Imre Deak | 604effb | 2014-08-26 13:26:56 +0300 | [diff] [blame] | 2493 | return -ENODEV; |
| 2494 | |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2495 | DRM_DEBUG_KMS("Suspending device\n"); |
| 2496 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2497 | disable_rpm_wakeref_asserts(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 2498 | |
Imre Deak | d610297 | 2014-05-07 19:57:49 +0300 | [diff] [blame] | 2499 | /* |
| 2500 | * We are safe here against re-faults, since the fault handler takes |
| 2501 | * an RPM reference. |
| 2502 | */ |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 2503 | i915_gem_runtime_suspend(dev_priv); |
Imre Deak | d610297 | 2014-05-07 19:57:49 +0300 | [diff] [blame] | 2504 | |
Daniele Ceraolo Spurio | 9dfe345 | 2019-07-31 17:57:09 -0700 | [diff] [blame] | 2505 | intel_gt_runtime_suspend(&dev_priv->gt); |
Alex Dai | a1c4199 | 2015-09-30 09:46:37 -0700 | [diff] [blame] | 2506 | |
Imre Deak | 2eb5252 | 2014-11-19 15:30:05 +0200 | [diff] [blame] | 2507 | intel_runtime_pm_disable_interrupts(dev_priv); |
Imre Deak | b5478bc | 2014-04-14 20:24:37 +0300 | [diff] [blame] | 2508 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 2509 | intel_uncore_suspend(&dev_priv->uncore); |
Hans de Goede | 01c799c | 2017-11-14 14:55:18 +0100 | [diff] [blame] | 2510 | |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 2511 | intel_display_power_suspend(dev_priv); |
| 2512 | |
| 2513 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Imre Deak | 507e126 | 2016-04-20 20:27:54 +0300 | [diff] [blame] | 2514 | ret = vlv_suspend_complete(dev_priv); |
Imre Deak | 507e126 | 2016-04-20 20:27:54 +0300 | [diff] [blame] | 2515 | |
Imre Deak | 0ab9cfe | 2014-04-15 16:39:45 +0300 | [diff] [blame] | 2516 | if (ret) { |
| 2517 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 2518 | intel_uncore_runtime_resume(&dev_priv->uncore); |
Hans de Goede | 01c799c | 2017-11-14 14:55:18 +0100 | [diff] [blame] | 2519 | |
Daniel Vetter | b963291 | 2014-09-30 10:56:44 +0200 | [diff] [blame] | 2520 | intel_runtime_pm_enable_interrupts(dev_priv); |
Imre Deak | 0ab9cfe | 2014-04-15 16:39:45 +0300 | [diff] [blame] | 2521 | |
Daniele Ceraolo Spurio | 9dfe345 | 2019-07-31 17:57:09 -0700 | [diff] [blame] | 2522 | intel_gt_runtime_resume(&dev_priv->gt); |
Sagar Arun Kamble | 1ed21cb | 2018-01-24 21:16:57 +0530 | [diff] [blame] | 2523 | |
Sagar Arun Kamble | 1ed21cb | 2018-01-24 21:16:57 +0530 | [diff] [blame] | 2524 | i915_gem_restore_fences(dev_priv); |
| 2525 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2526 | enable_rpm_wakeref_asserts(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 2527 | |
Imre Deak | 0ab9cfe | 2014-04-15 16:39:45 +0300 | [diff] [blame] | 2528 | return ret; |
| 2529 | } |
Paulo Zanoni | a8a8bd5 | 2014-03-07 20:08:05 -0300 | [diff] [blame] | 2530 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2531 | enable_rpm_wakeref_asserts(rpm); |
Janusz Krzysztofik | 3b58a94 | 2019-07-12 13:24:28 +0200 | [diff] [blame] | 2532 | intel_runtime_pm_driver_release(rpm); |
Mika Kuoppala | 55ec45c | 2015-12-15 16:25:08 +0200 | [diff] [blame] | 2533 | |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 2534 | if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) |
Mika Kuoppala | 55ec45c | 2015-12-15 16:25:08 +0200 | [diff] [blame] | 2535 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
| 2536 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2537 | rpm->suspended = true; |
Kristen Carlson Accardi | 1fb2362 | 2014-01-14 15:36:15 -0800 | [diff] [blame] | 2538 | |
| 2539 | /* |
Paulo Zanoni | c8a0bd4 | 2014-08-21 17:09:38 -0300 | [diff] [blame] | 2540 | * FIXME: We really should find a document that references the arguments |
| 2541 | * used below! |
Kristen Carlson Accardi | 1fb2362 | 2014-01-14 15:36:15 -0800 | [diff] [blame] | 2542 | */ |
Chris Wilson | 6f9f4b7 | 2016-05-23 15:08:09 +0100 | [diff] [blame] | 2543 | if (IS_BROADWELL(dev_priv)) { |
Paulo Zanoni | d37ae19 | 2015-07-30 18:20:29 -0300 | [diff] [blame] | 2544 | /* |
| 2545 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop |
| 2546 | * being detected, and the call we do at intel_runtime_resume() |
| 2547 | * won't be able to restore them. Since PCI_D3hot matches the |
| 2548 | * actual specification and appears to be working, use it. |
| 2549 | */ |
Chris Wilson | 6f9f4b7 | 2016-05-23 15:08:09 +0100 | [diff] [blame] | 2550 | intel_opregion_notify_adapter(dev_priv, PCI_D3hot); |
Paulo Zanoni | d37ae19 | 2015-07-30 18:20:29 -0300 | [diff] [blame] | 2551 | } else { |
Paulo Zanoni | c8a0bd4 | 2014-08-21 17:09:38 -0300 | [diff] [blame] | 2552 | /* |
| 2553 | * current versions of firmware which depend on this opregion |
| 2554 | * notification have repurposed the D1 definition to mean |
| 2555 | * "runtime suspended" vs. what you would normally expect (D3) |
| 2556 | * to distinguish it from notifications that might be sent via |
| 2557 | * the suspend path. |
| 2558 | */ |
Chris Wilson | 6f9f4b7 | 2016-05-23 15:08:09 +0100 | [diff] [blame] | 2559 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
Paulo Zanoni | c8a0bd4 | 2014-08-21 17:09:38 -0300 | [diff] [blame] | 2560 | } |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2561 | |
Daniele Ceraolo Spurio | f568eee | 2019-03-19 11:35:35 -0700 | [diff] [blame] | 2562 | assert_forcewakes_inactive(&dev_priv->uncore); |
Chris Wilson | dc9fb09 | 2015-01-16 11:34:34 +0200 | [diff] [blame] | 2563 | |
Ander Conselvan de Oliveira | 21d6e0b | 2017-01-20 16:28:43 +0200 | [diff] [blame] | 2564 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
Lyude | 19625e8 | 2016-06-21 17:03:44 -0400 | [diff] [blame] | 2565 | intel_hpd_poll_init(dev_priv); |
| 2566 | |
Paulo Zanoni | a8a8bd5 | 2014-03-07 20:08:05 -0300 | [diff] [blame] | 2567 | DRM_DEBUG_KMS("Device suspended\n"); |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2568 | return 0; |
| 2569 | } |
| 2570 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2571 | static int intel_runtime_resume(struct device *kdev) |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2572 | { |
Chris Wilson | 361f9dc | 2019-08-06 08:42:19 +0100 | [diff] [blame] | 2573 | struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
Daniele Ceraolo Spurio | 1bf676c | 2019-06-13 16:21:52 -0700 | [diff] [blame] | 2574 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
Paulo Zanoni | 1a5df18 | 2014-10-27 17:54:32 -0200 | [diff] [blame] | 2575 | int ret = 0; |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2576 | |
Tvrtko Ursulin | 6772ffe | 2016-10-13 11:02:55 +0100 | [diff] [blame] | 2577 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
Imre Deak | 604effb | 2014-08-26 13:26:56 +0300 | [diff] [blame] | 2578 | return -ENODEV; |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2579 | |
| 2580 | DRM_DEBUG_KMS("Resuming device\n"); |
| 2581 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2582 | WARN_ON_ONCE(atomic_read(&rpm->wakeref_count)); |
| 2583 | disable_rpm_wakeref_asserts(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 2584 | |
Chris Wilson | 6f9f4b7 | 2016-05-23 15:08:09 +0100 | [diff] [blame] | 2585 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2586 | rpm->suspended = false; |
Daniele Ceraolo Spurio | 2cf7bf6 | 2019-03-25 14:49:34 -0700 | [diff] [blame] | 2587 | if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) |
Mika Kuoppala | 55ec45c | 2015-12-15 16:25:08 +0200 | [diff] [blame] | 2588 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2589 | |
Rodrigo Vivi | 071b68c | 2019-08-06 15:22:08 +0300 | [diff] [blame] | 2590 | intel_display_power_resume(dev_priv); |
| 2591 | |
| 2592 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Paulo Zanoni | 1a5df18 | 2014-10-27 17:54:32 -0200 | [diff] [blame] | 2593 | ret = vlv_resume_prepare(dev_priv, true); |
| 2594 | |
Daniele Ceraolo Spurio | f7de502 | 2019-03-19 11:35:37 -0700 | [diff] [blame] | 2595 | intel_uncore_runtime_resume(&dev_priv->uncore); |
Hans de Goede | bedf4d7 | 2017-11-14 14:55:17 +0100 | [diff] [blame] | 2596 | |
Sagar Arun Kamble | 1ed21cb | 2018-01-24 21:16:57 +0530 | [diff] [blame] | 2597 | intel_runtime_pm_enable_interrupts(dev_priv); |
| 2598 | |
Imre Deak | 0ab9cfe | 2014-04-15 16:39:45 +0300 | [diff] [blame] | 2599 | /* |
| 2600 | * No point of rolling back things in case of an error, as the best |
| 2601 | * we can do is to hope that things will still work (and disable RPM). |
| 2602 | */ |
Daniele Ceraolo Spurio | 9dfe345 | 2019-07-31 17:57:09 -0700 | [diff] [blame] | 2603 | intel_gt_runtime_resume(&dev_priv->gt); |
Chris Wilson | 83bf6d5 | 2017-02-03 12:57:17 +0000 | [diff] [blame] | 2604 | i915_gem_restore_fences(dev_priv); |
Imre Deak | 92b806d | 2014-04-14 20:24:39 +0300 | [diff] [blame] | 2605 | |
Ville Syrjälä | 08d8a23 | 2015-08-27 23:56:08 +0300 | [diff] [blame] | 2606 | /* |
| 2607 | * On VLV/CHV display interrupts are part of the display |
| 2608 | * power well, so hpd is reinitialized from there. For |
| 2609 | * everyone else do it here. |
| 2610 | */ |
Wayne Boyer | 666a453 | 2015-12-09 12:29:35 -0800 | [diff] [blame] | 2611 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
Ville Syrjälä | 08d8a23 | 2015-08-27 23:56:08 +0300 | [diff] [blame] | 2612 | intel_hpd_init(dev_priv); |
| 2613 | |
Kumar, Mahesh | 2503a0f | 2017-08-17 19:15:28 +0530 | [diff] [blame] | 2614 | intel_enable_ipc(dev_priv); |
| 2615 | |
Daniele Ceraolo Spurio | 9102650 | 2019-06-13 16:21:51 -0700 | [diff] [blame] | 2616 | enable_rpm_wakeref_asserts(rpm); |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 2617 | |
Imre Deak | 0ab9cfe | 2014-04-15 16:39:45 +0300 | [diff] [blame] | 2618 | if (ret) |
| 2619 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
| 2620 | else |
| 2621 | DRM_DEBUG_KMS("Device resumed\n"); |
| 2622 | |
| 2623 | return ret; |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2624 | } |
| 2625 | |
Chris Wilson | 42f5551 | 2016-06-24 14:00:26 +0100 | [diff] [blame] | 2626 | const struct dev_pm_ops i915_pm_ops = { |
Imre Deak | 5545dbb | 2014-10-23 19:23:28 +0300 | [diff] [blame] | 2627 | /* |
| 2628 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, |
| 2629 | * PMSG_RESUME] |
| 2630 | */ |
Chris Wilson | 73b66f8 | 2018-05-25 10:26:29 +0100 | [diff] [blame] | 2631 | .prepare = i915_pm_prepare, |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 2632 | .suspend = i915_pm_suspend, |
Imre Deak | 76c4b25 | 2014-04-01 19:55:22 +0300 | [diff] [blame] | 2633 | .suspend_late = i915_pm_suspend_late, |
| 2634 | .resume_early = i915_pm_resume_early, |
Akshay Joshi | 0206e35 | 2011-08-16 15:34:10 -0400 | [diff] [blame] | 2635 | .resume = i915_pm_resume, |
Imre Deak | 5545dbb | 2014-10-23 19:23:28 +0300 | [diff] [blame] | 2636 | |
| 2637 | /* |
| 2638 | * S4 event handlers |
| 2639 | * @freeze, @freeze_late : called (1) before creating the |
| 2640 | * hibernation image [PMSG_FREEZE] and |
| 2641 | * (2) after rebooting, before restoring |
| 2642 | * the image [PMSG_QUIESCE] |
| 2643 | * @thaw, @thaw_early : called (1) after creating the hibernation |
| 2644 | * image, before writing it [PMSG_THAW] |
| 2645 | * and (2) after failing to create or |
| 2646 | * restore the image [PMSG_RECOVER] |
| 2647 | * @poweroff, @poweroff_late: called after writing the hibernation |
| 2648 | * image, before rebooting [PMSG_HIBERNATE] |
| 2649 | * @restore, @restore_early : called after rebooting and restoring the |
| 2650 | * hibernation image [PMSG_RESTORE] |
| 2651 | */ |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2652 | .freeze = i915_pm_freeze, |
| 2653 | .freeze_late = i915_pm_freeze_late, |
| 2654 | .thaw_early = i915_pm_thaw_early, |
| 2655 | .thaw = i915_pm_thaw, |
Imre Deak | 36d61e6 | 2014-10-23 19:23:24 +0300 | [diff] [blame] | 2656 | .poweroff = i915_pm_suspend, |
Imre Deak | ab3be73 | 2015-03-02 13:04:41 +0200 | [diff] [blame] | 2657 | .poweroff_late = i915_pm_poweroff_late, |
Chris Wilson | 1f19ac2 | 2016-05-14 07:26:32 +0100 | [diff] [blame] | 2658 | .restore_early = i915_pm_restore_early, |
| 2659 | .restore = i915_pm_restore, |
Imre Deak | 5545dbb | 2014-10-23 19:23:28 +0300 | [diff] [blame] | 2660 | |
| 2661 | /* S0ix (via runtime suspend) event handlers */ |
Paulo Zanoni | 97bea20 | 2014-03-07 20:12:33 -0300 | [diff] [blame] | 2662 | .runtime_suspend = intel_runtime_suspend, |
| 2663 | .runtime_resume = intel_runtime_resume, |
Zhenyu Wang | cbda12d | 2009-12-16 13:36:10 +0800 | [diff] [blame] | 2664 | }; |
| 2665 | |
Laurent Pinchart | 78b6855 | 2012-05-17 13:27:22 +0200 | [diff] [blame] | 2666 | static const struct vm_operations_struct i915_gem_vm_ops = { |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2667 | .fault = i915_gem_fault, |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 2668 | .open = drm_gem_vm_open, |
| 2669 | .close = drm_gem_vm_close, |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2670 | }; |
| 2671 | |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 2672 | static const struct file_operations i915_driver_fops = { |
| 2673 | .owner = THIS_MODULE, |
| 2674 | .open = drm_open, |
| 2675 | .release = drm_release, |
| 2676 | .unlocked_ioctl = drm_ioctl, |
| 2677 | .mmap = drm_gem_mmap, |
| 2678 | .poll = drm_poll, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 2679 | .read = drm_read, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 2680 | .compat_ioctl = i915_compat_ioctl, |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 2681 | .llseek = noop_llseek, |
| 2682 | }; |
| 2683 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2684 | static int |
| 2685 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, |
| 2686 | struct drm_file *file) |
| 2687 | { |
| 2688 | return -ENODEV; |
| 2689 | } |
| 2690 | |
| 2691 | static const struct drm_ioctl_desc i915_ioctls[] = { |
| 2692 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2693 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), |
| 2694 | DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), |
| 2695 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), |
| 2696 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), |
| 2697 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), |
Christian König | b972fff | 2019-04-17 13:25:24 +0200 | [diff] [blame] | 2698 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2699 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2700 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
| 2701 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
| 2702 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2703 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), |
| 2704 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2705 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2706 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), |
| 2707 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), |
| 2708 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2709 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
Ville Syrjälä | 6a20fe7 | 2018-02-07 18:48:41 +0200 | [diff] [blame] | 2710 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH), |
Christian König | b972fff | 2019-04-17 13:25:24 +0200 | [diff] [blame] | 2711 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2712 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
| 2713 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
Christian König | b972fff | 2019-04-17 13:25:24 +0200 | [diff] [blame] | 2714 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2715 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), |
| 2716 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), |
Christian König | b972fff | 2019-04-17 13:25:24 +0200 | [diff] [blame] | 2717 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2718 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2719 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2720 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), |
| 2721 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), |
| 2722 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), |
| 2723 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), |
| 2724 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), |
| 2725 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), |
| 2726 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 111dbca | 2017-01-10 12:10:44 +0000 | [diff] [blame] | 2727 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
| 2728 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2729 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), |
Ville Syrjälä | 6a20fe7 | 2018-02-07 18:48:41 +0200 | [diff] [blame] | 2730 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2731 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), |
Daniel Vetter | 0cd54b0 | 2018-04-20 08:51:57 +0200 | [diff] [blame] | 2732 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), |
| 2733 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), |
| 2734 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), |
| 2735 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), |
Christian König | b972fff | 2019-04-17 13:25:24 +0200 | [diff] [blame] | 2736 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | b917154 | 2019-03-22 09:23:24 +0000 | [diff] [blame] | 2737 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2738 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), |
| 2739 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), |
| 2740 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), |
| 2741 | DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), |
| 2742 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), |
| 2743 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 2744 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 2745 | DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
| 2746 | DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
Lionel Landwerlin | a446ae2 | 2018-03-06 12:28:56 +0000 | [diff] [blame] | 2747 | DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
Chris Wilson | 7f3f317a | 2019-05-21 22:11:25 +0100 | [diff] [blame] | 2748 | DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), |
| 2749 | DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2750 | }; |
| 2751 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2752 | static struct drm_driver driver = { |
Michael Witten | 0c54781 | 2011-08-25 17:55:54 +0000 | [diff] [blame] | 2753 | /* Don't use MTRRs here; the Xserver or userspace app should |
| 2754 | * deal with them for Intel hardware. |
Dave Airlie | 792d2b9 | 2005-11-11 23:30:27 +1100 | [diff] [blame] | 2755 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2756 | .driver_features = |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 2757 | DRIVER_GEM | DRIVER_PRIME | |
Jason Ekstrand | cf6e7ba | 2017-08-15 15:57:33 +0100 | [diff] [blame] | 2758 | DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, |
Chris Wilson | cad3688 | 2017-02-10 16:35:21 +0000 | [diff] [blame] | 2759 | .release = i915_driver_release, |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2760 | .open = i915_driver_open, |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 2761 | .lastclose = i915_driver_lastclose, |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2762 | .postclose = i915_driver_postclose, |
Rafael J. Wysocki | d8e2920 | 2010-01-09 00:45:33 +0100 | [diff] [blame] | 2763 | |
Chris Wilson | b1f788c | 2016-08-04 07:52:45 +0100 | [diff] [blame] | 2764 | .gem_close_object = i915_gem_close_object, |
Chris Wilson | f0cd518 | 2016-10-28 13:58:43 +0100 | [diff] [blame] | 2765 | .gem_free_object_unlocked = i915_gem_free_object, |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2766 | .gem_vm_ops = &i915_gem_vm_ops, |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 2767 | |
| 2768 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| 2769 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 2770 | .gem_prime_export = i915_gem_prime_export, |
| 2771 | .gem_prime_import = i915_gem_prime_import, |
| 2772 | |
Ville Syrjälä | 7d23e59 | 2019-06-19 20:08:42 +0300 | [diff] [blame] | 2773 | .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, |
| 2774 | .get_scanout_position = i915_get_crtc_scanoutpos, |
| 2775 | |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 2776 | .dumb_create = i915_gem_dumb_create, |
Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 2777 | .dumb_map_offset = i915_gem_mmap_gtt, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2778 | .ioctls = i915_ioctls, |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2779 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
Arjan van de Ven | e08e96d | 2011-10-31 07:28:57 -0700 | [diff] [blame] | 2780 | .fops = &i915_driver_fops, |
Dave Airlie | 22eae94 | 2005-11-10 22:16:34 +1100 | [diff] [blame] | 2781 | .name = DRIVER_NAME, |
| 2782 | .desc = DRIVER_DESC, |
| 2783 | .date = DRIVER_DATE, |
| 2784 | .major = DRIVER_MAJOR, |
| 2785 | .minor = DRIVER_MINOR, |
| 2786 | .patchlevel = DRIVER_PATCHLEVEL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2787 | }; |
Chris Wilson | 66d9cb5 | 2017-02-13 17:15:17 +0000 | [diff] [blame] | 2788 | |
| 2789 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 2790 | #include "selftests/mock_drm.c" |
| 2791 | #endif |