Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
| 2 | */ |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 3 | /* |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 6 | * All Rights Reserved. |
Dave Airlie | bc54fd1 | 2005-06-23 22:46:46 +1000 | [diff] [blame] | 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 9 | * copy of this software and associated documentation files (the |
| 10 | * "Software"), to deal in the Software without restriction, including |
| 11 | * without limitation the rights to use, copy, modify, merge, publish, |
| 12 | * distribute, sub license, and/or sell copies of the Software, and to |
| 13 | * permit persons to whom the Software is furnished to do so, subject to |
| 14 | * the following conditions: |
| 15 | * |
| 16 | * The above copyright notice and this permission notice (including the |
| 17 | * next paragraph) shall be included in all copies or substantial portions |
| 18 | * of the Software. |
| 19 | * |
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 27 | * |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 28 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
| 30 | #ifndef _I915_DRV_H_ |
| 31 | #define _I915_DRV_H_ |
| 32 | |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 33 | #include <uapi/drm/i915_drm.h> |
Tvrtko Ursulin | 93b81f5 | 2015-02-10 17:16:05 +0000 | [diff] [blame] | 34 | #include <uapi/drm/drm_fourcc.h> |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 35 | |
Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 36 | #include <linux/io-mapping.h> |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 37 | #include <linux/i2c.h> |
Daniel Vetter | c167a6f | 2012-02-28 00:43:09 +0100 | [diff] [blame] | 38 | #include <linux/i2c-algo-bit.h> |
Matthew Garrett | aaa6fd2 | 2011-08-12 12:11:33 +0200 | [diff] [blame] | 39 | #include <linux/backlight.h> |
Chris Wilson | 4ff4b44 | 2017-06-16 15:05:16 +0100 | [diff] [blame] | 40 | #include <linux/hash.h> |
Ben Widawsky | 2911a35 | 2012-04-05 14:47:36 -0700 | [diff] [blame] | 41 | #include <linux/intel-iommu.h> |
Daniel Vetter | 742cbee | 2012-04-27 15:17:39 +0200 | [diff] [blame] | 42 | #include <linux/kref.h> |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 43 | #include <linux/perf_event.h> |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 44 | #include <linux/pm_qos.h> |
Chris Wilson | d07f0e5 | 2016-10-28 13:58:44 +0100 | [diff] [blame] | 45 | #include <linux/reservation.h> |
Chris Wilson | e73bdd2 | 2016-04-13 17:35:01 +0100 | [diff] [blame] | 46 | #include <linux/shmem_fs.h> |
| 47 | |
| 48 | #include <drm/drmP.h> |
| 49 | #include <drm/intel-gtt.h> |
| 50 | #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ |
| 51 | #include <drm/drm_gem.h> |
Daniel Vetter | 3b96a0b | 2016-06-21 10:54:22 +0200 | [diff] [blame] | 52 | #include <drm/drm_auth.h> |
Gabriel Krisman Bertazi | f9a87bd | 2017-01-09 19:56:49 -0200 | [diff] [blame] | 53 | #include <drm/drm_cache.h> |
Chris Wilson | e73bdd2 | 2016-04-13 17:35:01 +0100 | [diff] [blame] | 54 | |
| 55 | #include "i915_params.h" |
| 56 | #include "i915_reg.h" |
Chris Wilson | 40b326e | 2017-01-05 15:30:22 +0000 | [diff] [blame] | 57 | #include "i915_utils.h" |
Chris Wilson | e73bdd2 | 2016-04-13 17:35:01 +0100 | [diff] [blame] | 58 | |
| 59 | #include "intel_bios.h" |
Michal Wajdeczko | b978520 | 2017-12-21 21:57:32 +0000 | [diff] [blame] | 60 | #include "intel_device_info.h" |
Michal Wajdeczko | 09a28bd | 2017-12-21 21:57:30 +0000 | [diff] [blame] | 61 | #include "intel_display.h" |
Michal Wajdeczko | 3846a9b | 2017-12-21 21:57:31 +0000 | [diff] [blame] | 62 | #include "intel_dpll_mgr.h" |
| 63 | #include "intel_lrc.h" |
| 64 | #include "intel_opregion.h" |
| 65 | #include "intel_ringbuffer.h" |
| 66 | #include "intel_uncore.h" |
| 67 | #include "intel_uc.h" |
Chris Wilson | e73bdd2 | 2016-04-13 17:35:01 +0100 | [diff] [blame] | 68 | |
Chris Wilson | d501b1d | 2016-04-13 17:35:02 +0100 | [diff] [blame] | 69 | #include "i915_gem.h" |
Chris Wilson | 6095868 | 2016-12-31 11:20:11 +0000 | [diff] [blame] | 70 | #include "i915_gem_context.h" |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 71 | #include "i915_gem_fence_reg.h" |
| 72 | #include "i915_gem_object.h" |
Chris Wilson | e73bdd2 | 2016-04-13 17:35:01 +0100 | [diff] [blame] | 73 | #include "i915_gem_gtt.h" |
Chris Wilson | 05235c5 | 2016-07-20 09:21:08 +0100 | [diff] [blame] | 74 | #include "i915_gem_request.h" |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 75 | #include "i915_gem_timeline.h" |
Jesse Barnes | 585fb11 | 2008-07-29 11:54:06 -0700 | [diff] [blame] | 76 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 77 | #include "i915_vma.h" |
| 78 | |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 79 | #include "intel_gvt.h" |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* General customization: |
| 82 | */ |
| 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | #define DRIVER_NAME "i915" |
| 85 | #define DRIVER_DESC "Intel Graphics" |
Rodrigo Vivi | cfe4982 | 2017-12-22 11:41:50 -0800 | [diff] [blame] | 86 | #define DRIVER_DATE "20171222" |
| 87 | #define DRIVER_TIMESTAMP 1513971710 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
Rob Clark | e2c719b | 2014-12-15 13:56:32 -0500 | [diff] [blame] | 89 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
| 90 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions |
| 91 | * which may not necessarily be a user visible problem. This will either |
| 92 | * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to |
| 93 | * enable distros and users to tailor their preferred amount of i915 abrt |
| 94 | * spam. |
| 95 | */ |
| 96 | #define I915_STATE_WARN(condition, format...) ({ \ |
| 97 | int __ret_warn_on = !!(condition); \ |
Joonas Lahtinen | 32753cb | 2015-12-18 14:27:26 +0200 | [diff] [blame] | 98 | if (unlikely(__ret_warn_on)) \ |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame] | 99 | if (!WARN(i915_modparams.verbose_state_checks, format)) \ |
Rob Clark | e2c719b | 2014-12-15 13:56:32 -0500 | [diff] [blame] | 100 | DRM_ERROR(format); \ |
Rob Clark | e2c719b | 2014-12-15 13:56:32 -0500 | [diff] [blame] | 101 | unlikely(__ret_warn_on); \ |
| 102 | }) |
| 103 | |
Joonas Lahtinen | 152b226 | 2015-12-18 14:27:27 +0200 | [diff] [blame] | 104 | #define I915_STATE_WARN_ON(x) \ |
| 105 | I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") |
Mika Kuoppala | c883ef1 | 2014-10-28 17:32:30 +0200 | [diff] [blame] | 106 | |
Michal Wajdeczko | fae919f | 2018-02-01 17:32:48 +0000 | [diff] [blame^] | 107 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
Imre Deak | 4fec15d | 2016-03-16 13:39:08 +0200 | [diff] [blame] | 108 | bool __i915_inject_load_failure(const char *func, int line); |
| 109 | #define i915_inject_load_failure() \ |
| 110 | __i915_inject_load_failure(__func__, __LINE__) |
Michal Wajdeczko | fae919f | 2018-02-01 17:32:48 +0000 | [diff] [blame^] | 111 | #else |
| 112 | #define i915_inject_load_failure() false |
| 113 | #endif |
Imre Deak | 4fec15d | 2016-03-16 13:39:08 +0200 | [diff] [blame] | 114 | |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 115 | typedef struct { |
| 116 | uint32_t val; |
| 117 | } uint_fixed_16_16_t; |
| 118 | |
| 119 | #define FP_16_16_MAX ({ \ |
| 120 | uint_fixed_16_16_t fp; \ |
| 121 | fp.val = UINT_MAX; \ |
| 122 | fp; \ |
| 123 | }) |
| 124 | |
Kumar, Mahesh | d555cb5 | 2017-05-17 17:28:29 +0530 | [diff] [blame] | 125 | static inline bool is_fixed16_zero(uint_fixed_16_16_t val) |
| 126 | { |
| 127 | if (val.val == 0) |
| 128 | return true; |
| 129 | return false; |
| 130 | } |
| 131 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 132 | static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 133 | { |
| 134 | uint_fixed_16_16_t fp; |
| 135 | |
Kumar, Mahesh | 0b4d7cb | 2017-08-17 19:15:22 +0530 | [diff] [blame] | 136 | WARN_ON(val > U16_MAX); |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 137 | |
| 138 | fp.val = val << 16; |
| 139 | return fp; |
| 140 | } |
| 141 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 142 | static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 143 | { |
| 144 | return DIV_ROUND_UP(fp.val, 1 << 16); |
| 145 | } |
| 146 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 147 | static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 148 | { |
| 149 | return fp.val >> 16; |
| 150 | } |
| 151 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 152 | static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 153 | uint_fixed_16_16_t min2) |
| 154 | { |
| 155 | uint_fixed_16_16_t min; |
| 156 | |
| 157 | min.val = min(min1.val, min2.val); |
| 158 | return min; |
| 159 | } |
| 160 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 161 | static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 162 | uint_fixed_16_16_t max2) |
| 163 | { |
| 164 | uint_fixed_16_16_t max; |
| 165 | |
| 166 | max.val = max(max1.val, max2.val); |
| 167 | return max; |
| 168 | } |
| 169 | |
Kumar, Mahesh | 07ab976 | 2017-07-05 20:01:44 +0530 | [diff] [blame] | 170 | static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) |
| 171 | { |
| 172 | uint_fixed_16_16_t fp; |
Kumar, Mahesh | 0b4d7cb | 2017-08-17 19:15:22 +0530 | [diff] [blame] | 173 | WARN_ON(val > U32_MAX); |
| 174 | fp.val = (uint32_t) val; |
Kumar, Mahesh | 07ab976 | 2017-07-05 20:01:44 +0530 | [diff] [blame] | 175 | return fp; |
| 176 | } |
| 177 | |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 178 | static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, |
| 179 | uint_fixed_16_16_t d) |
| 180 | { |
| 181 | return DIV_ROUND_UP(val.val, d.val); |
| 182 | } |
| 183 | |
| 184 | static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, |
| 185 | uint_fixed_16_16_t mul) |
| 186 | { |
| 187 | uint64_t intermediate_val; |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 188 | |
| 189 | intermediate_val = (uint64_t) val * mul.val; |
| 190 | intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); |
Kumar, Mahesh | 0b4d7cb | 2017-08-17 19:15:22 +0530 | [diff] [blame] | 191 | WARN_ON(intermediate_val > U32_MAX); |
| 192 | return (uint32_t) intermediate_val; |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, |
| 196 | uint_fixed_16_16_t mul) |
| 197 | { |
| 198 | uint64_t intermediate_val; |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 199 | |
| 200 | intermediate_val = (uint64_t) val.val * mul.val; |
| 201 | intermediate_val = intermediate_val >> 16; |
Kumar, Mahesh | 07ab976 | 2017-07-05 20:01:44 +0530 | [diff] [blame] | 202 | return clamp_u64_to_fixed16(intermediate_val); |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 203 | } |
| 204 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 205 | static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 206 | { |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 207 | uint64_t interm_val; |
| 208 | |
| 209 | interm_val = (uint64_t)val << 16; |
| 210 | interm_val = DIV_ROUND_UP_ULL(interm_val, d); |
Kumar, Mahesh | 07ab976 | 2017-07-05 20:01:44 +0530 | [diff] [blame] | 211 | return clamp_u64_to_fixed16(interm_val); |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 212 | } |
| 213 | |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 214 | static inline uint32_t div_round_up_u32_fixed16(uint32_t val, |
| 215 | uint_fixed_16_16_t d) |
| 216 | { |
| 217 | uint64_t interm_val; |
| 218 | |
| 219 | interm_val = (uint64_t)val << 16; |
| 220 | interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); |
Kumar, Mahesh | 0b4d7cb | 2017-08-17 19:15:22 +0530 | [diff] [blame] | 221 | WARN_ON(interm_val > U32_MAX); |
| 222 | return (uint32_t) interm_val; |
Kumar, Mahesh | a9d055d | 2017-05-17 17:28:21 +0530 | [diff] [blame] | 223 | } |
| 224 | |
Kumar, Mahesh | eac2cb8 | 2017-07-05 20:01:46 +0530 | [diff] [blame] | 225 | static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 226 | uint_fixed_16_16_t mul) |
| 227 | { |
| 228 | uint64_t intermediate_val; |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 229 | |
| 230 | intermediate_val = (uint64_t) val * mul.val; |
Kumar, Mahesh | 07ab976 | 2017-07-05 20:01:44 +0530 | [diff] [blame] | 231 | return clamp_u64_to_fixed16(intermediate_val); |
Mahesh Kumar | b95320b | 2016-12-01 21:19:37 +0530 | [diff] [blame] | 232 | } |
| 233 | |
Kumar, Mahesh | 6ea593c0 | 2017-07-05 20:01:47 +0530 | [diff] [blame] | 234 | static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, |
| 235 | uint_fixed_16_16_t add2) |
| 236 | { |
| 237 | uint64_t interm_sum; |
| 238 | |
| 239 | interm_sum = (uint64_t) add1.val + add2.val; |
| 240 | return clamp_u64_to_fixed16(interm_sum); |
| 241 | } |
| 242 | |
| 243 | static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, |
| 244 | uint32_t add2) |
| 245 | { |
| 246 | uint64_t interm_sum; |
| 247 | uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); |
| 248 | |
| 249 | interm_sum = (uint64_t) add1.val + interm_add2.val; |
| 250 | return clamp_u64_to_fixed16(interm_sum); |
| 251 | } |
| 252 | |
Egbert Eich | 1d843f9 | 2013-02-25 12:06:49 -0500 | [diff] [blame] | 253 | enum hpd_pin { |
| 254 | HPD_NONE = 0, |
Egbert Eich | 1d843f9 | 2013-02-25 12:06:49 -0500 | [diff] [blame] | 255 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
| 256 | HPD_CRT, |
| 257 | HPD_SDVO_B, |
| 258 | HPD_SDVO_C, |
Imre Deak | cc24fcd | 2015-07-21 15:32:45 -0700 | [diff] [blame] | 259 | HPD_PORT_A, |
Egbert Eich | 1d843f9 | 2013-02-25 12:06:49 -0500 | [diff] [blame] | 260 | HPD_PORT_B, |
| 261 | HPD_PORT_C, |
| 262 | HPD_PORT_D, |
Xiong Zhang | 26951ca | 2015-08-17 15:55:50 +0800 | [diff] [blame] | 263 | HPD_PORT_E, |
Egbert Eich | 1d843f9 | 2013-02-25 12:06:49 -0500 | [diff] [blame] | 264 | HPD_NUM_PINS |
| 265 | }; |
| 266 | |
Jani Nikula | c91711f | 2015-05-28 15:43:48 +0300 | [diff] [blame] | 267 | #define for_each_hpd_pin(__pin) \ |
| 268 | for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) |
| 269 | |
Lyude | 317eaa9 | 2017-02-03 21:18:25 -0500 | [diff] [blame] | 270 | #define HPD_STORM_DEFAULT_THRESHOLD 5 |
| 271 | |
Jani Nikula | 5fcece8 | 2015-05-27 15:03:42 +0300 | [diff] [blame] | 272 | struct i915_hotplug { |
| 273 | struct work_struct hotplug_work; |
| 274 | |
| 275 | struct { |
| 276 | unsigned long last_jiffies; |
| 277 | int count; |
| 278 | enum { |
| 279 | HPD_ENABLED = 0, |
| 280 | HPD_DISABLED = 1, |
| 281 | HPD_MARK_DISABLED = 2 |
| 282 | } state; |
| 283 | } stats[HPD_NUM_PINS]; |
| 284 | u32 event_bits; |
| 285 | struct delayed_work reenable_work; |
| 286 | |
| 287 | struct intel_digital_port *irq_port[I915_MAX_PORTS]; |
| 288 | u32 long_port_mask; |
| 289 | u32 short_port_mask; |
| 290 | struct work_struct dig_port_work; |
| 291 | |
Lyude | 19625e8 | 2016-06-21 17:03:44 -0400 | [diff] [blame] | 292 | struct work_struct poll_init_work; |
| 293 | bool poll_enabled; |
| 294 | |
Lyude | 317eaa9 | 2017-02-03 21:18:25 -0500 | [diff] [blame] | 295 | unsigned int hpd_storm_threshold; |
| 296 | |
Jani Nikula | 5fcece8 | 2015-05-27 15:03:42 +0300 | [diff] [blame] | 297 | /* |
| 298 | * if we get a HPD irq from DP and a HPD irq from non-DP |
| 299 | * the non-DP HPD could block the workqueue on a mode config |
| 300 | * mutex getting, that userspace may have taken. However |
| 301 | * userspace is waiting on the DP workqueue to run which is |
| 302 | * blocked behind the non-DP one. |
| 303 | */ |
| 304 | struct workqueue_struct *dp_wq; |
| 305 | }; |
| 306 | |
Chris Wilson | 2a2d548 | 2012-12-03 11:49:06 +0000 | [diff] [blame] | 307 | #define I915_GEM_GPU_DOMAINS \ |
| 308 | (I915_GEM_DOMAIN_RENDER | \ |
| 309 | I915_GEM_DOMAIN_SAMPLER | \ |
| 310 | I915_GEM_DOMAIN_COMMAND | \ |
| 311 | I915_GEM_DOMAIN_INSTRUCTION | \ |
| 312 | I915_GEM_DOMAIN_VERTEX) |
Eric Anholt | 62fdfea | 2010-05-21 13:26:39 -0700 | [diff] [blame] | 313 | |
Daniel Vetter | e7b903d | 2013-06-05 13:34:14 +0200 | [diff] [blame] | 314 | struct drm_i915_private; |
Chris Wilson | ad46cb5 | 2014-08-07 14:20:40 +0100 | [diff] [blame] | 315 | struct i915_mm_struct; |
Chris Wilson | 5cc9ed4 | 2014-05-16 14:22:37 +0100 | [diff] [blame] | 316 | struct i915_mmu_object; |
Daniel Vetter | e7b903d | 2013-06-05 13:34:14 +0200 | [diff] [blame] | 317 | |
Chris Wilson | a6f766f | 2015-04-27 13:41:20 +0100 | [diff] [blame] | 318 | struct drm_i915_file_private { |
| 319 | struct drm_i915_private *dev_priv; |
| 320 | struct drm_file *file; |
| 321 | |
| 322 | struct { |
| 323 | spinlock_t lock; |
| 324 | struct list_head request_list; |
Chris Wilson | d0bc54f | 2015-05-21 21:01:48 +0100 | [diff] [blame] | 325 | /* 20ms is a fairly arbitrary limit (greater than the average frame time) |
| 326 | * chosen to prevent the CPU getting more than a frame ahead of the GPU |
| 327 | * (when using lax throttling for the frontbuffer). We also use it to |
| 328 | * offer free GPU waitboosts for severely congested workloads. |
| 329 | */ |
| 330 | #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) |
Chris Wilson | a6f766f | 2015-04-27 13:41:20 +0100 | [diff] [blame] | 331 | } mm; |
| 332 | struct idr context_idr; |
| 333 | |
Chris Wilson | 2e1b873 | 2015-04-27 13:41:22 +0100 | [diff] [blame] | 334 | struct intel_rps_client { |
Chris Wilson | 7b92c1b | 2017-06-28 13:35:48 +0100 | [diff] [blame] | 335 | atomic_t boosts; |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 336 | } rps_client; |
Chris Wilson | a6f766f | 2015-04-27 13:41:20 +0100 | [diff] [blame] | 337 | |
Chris Wilson | c80ff16 | 2016-07-27 09:07:27 +0100 | [diff] [blame] | 338 | unsigned int bsd_engine; |
Mika Kuoppala | b083a08 | 2016-11-18 15:10:47 +0200 | [diff] [blame] | 339 | |
| 340 | /* Client can have a maximum of 3 contexts banned before |
| 341 | * it is denied of creating new contexts. As one context |
| 342 | * ban needs 4 consecutive hangs, and more if there is |
| 343 | * progress in between, this is a last resort stop gap measure |
| 344 | * to limit the badly behaving clients access to gpu. |
| 345 | */ |
| 346 | #define I915_MAX_CLIENT_CONTEXT_BANS 3 |
Chris Wilson | 77b25a9 | 2017-07-21 13:32:30 +0100 | [diff] [blame] | 347 | atomic_t context_bans; |
Chris Wilson | a6f766f | 2015-04-27 13:41:20 +0100 | [diff] [blame] | 348 | }; |
| 349 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | /* Interface history: |
| 351 | * |
| 352 | * 1.1: Original. |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 353 | * 1.2: Add Power Management |
| 354 | * 1.3: Add vblank support |
Dave Airlie | de227f5 | 2006-01-25 15:31:43 +1100 | [diff] [blame] | 355 | * 1.4: Fix cmdbuffer path, add heap destroy |
Dave Airlie | 702880f | 2006-06-24 17:07:34 +1000 | [diff] [blame] | 356 | * 1.5: Add vblank pipe configuration |
=?utf-8?q?Michel_D=C3=A4nzer?= | 2228ed6 | 2006-10-25 01:05:09 +1000 | [diff] [blame] | 357 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
| 358 | * - Support vertical blank on secondary display pipe |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | */ |
| 360 | #define DRIVER_MAJOR 1 |
=?utf-8?q?Michel_D=C3=A4nzer?= | 2228ed6 | 2006-10-25 01:05:09 +1000 | [diff] [blame] | 361 | #define DRIVER_MINOR 6 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | #define DRIVER_PATCHLEVEL 0 |
| 363 | |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 364 | struct intel_overlay; |
| 365 | struct intel_overlay_error_state; |
| 366 | |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 367 | struct sdvo_device_mapping { |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 368 | u8 initialized; |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 369 | u8 dvo_port; |
| 370 | u8 slave_addr; |
| 371 | u8 dvo_wiring; |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 372 | u8 i2c_pin; |
Adam Jackson | b108333 | 2010-04-23 16:07:40 -0400 | [diff] [blame] | 373 | u8 ddc_pin; |
yakui_zhao | 9b9d172 | 2009-05-31 17:17:17 +0800 | [diff] [blame] | 374 | }; |
| 375 | |
Jani Nikula | 7bd688c | 2013-11-08 16:48:56 +0200 | [diff] [blame] | 376 | struct intel_connector; |
Jani Nikula | 820d2d7 | 2014-10-27 16:26:47 +0200 | [diff] [blame] | 377 | struct intel_encoder; |
Maarten Lankhorst | ccf010f | 2016-11-08 13:55:32 +0100 | [diff] [blame] | 378 | struct intel_atomic_state; |
Ander Conselvan de Oliveira | 5cec258 | 2015-01-15 14:55:21 +0200 | [diff] [blame] | 379 | struct intel_crtc_state; |
Damien Lespiau | 5724dbd | 2015-01-20 12:51:52 +0000 | [diff] [blame] | 380 | struct intel_initial_plane_config; |
Daniel Vetter | 0e8ffe1 | 2013-03-28 10:42:00 +0100 | [diff] [blame] | 381 | struct intel_crtc; |
Daniel Vetter | ee9300b | 2013-06-03 22:40:22 +0200 | [diff] [blame] | 382 | struct intel_limit; |
| 383 | struct dpll; |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 384 | struct intel_cdclk_state; |
Daniel Vetter | b8cecdf | 2013-03-27 00:44:50 +0100 | [diff] [blame] | 385 | |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 386 | struct drm_i915_display_funcs { |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 387 | void (*get_cdclk)(struct drm_i915_private *dev_priv, |
| 388 | struct intel_cdclk_state *cdclk_state); |
Ville Syrjälä | b0587e4 | 2017-01-26 21:52:01 +0200 | [diff] [blame] | 389 | void (*set_cdclk)(struct drm_i915_private *dev_priv, |
| 390 | const struct intel_cdclk_state *cdclk_state); |
Ville Syrjälä | bdaf843 | 2017-11-17 21:19:11 +0200 | [diff] [blame] | 391 | int (*get_fifo_size)(struct drm_i915_private *dev_priv, |
| 392 | enum i9xx_plane_id i9xx_plane); |
Maarten Lankhorst | e3bddde | 2016-03-01 11:07:22 +0100 | [diff] [blame] | 393 | int (*compute_pipe_wm)(struct intel_crtc_state *cstate); |
Matt Roper | ed4a6a7 | 2016-02-23 17:20:13 -0800 | [diff] [blame] | 394 | int (*compute_intermediate_wm)(struct drm_device *dev, |
| 395 | struct intel_crtc *intel_crtc, |
| 396 | struct intel_crtc_state *newstate); |
Maarten Lankhorst | ccf010f | 2016-11-08 13:55:32 +0100 | [diff] [blame] | 397 | void (*initial_watermarks)(struct intel_atomic_state *state, |
| 398 | struct intel_crtc_state *cstate); |
| 399 | void (*atomic_update_watermarks)(struct intel_atomic_state *state, |
| 400 | struct intel_crtc_state *cstate); |
| 401 | void (*optimize_watermarks)(struct intel_atomic_state *state, |
| 402 | struct intel_crtc_state *cstate); |
Matt Roper | 98d3949 | 2016-05-12 07:06:03 -0700 | [diff] [blame] | 403 | int (*compute_global_watermarks)(struct drm_atomic_state *state); |
Ville Syrjälä | 432081b | 2016-10-31 22:37:03 +0200 | [diff] [blame] | 404 | void (*update_wm)(struct intel_crtc *crtc); |
Maarten Lankhorst | 27c329e | 2015-06-15 12:33:56 +0200 | [diff] [blame] | 405 | int (*modeset_calc_cdclk)(struct drm_atomic_state *state); |
Daniel Vetter | 0e8ffe1 | 2013-03-28 10:42:00 +0100 | [diff] [blame] | 406 | /* Returns the active state of the crtc, and if the crtc is active, |
| 407 | * fills out the pipe-config with the hw state. */ |
| 408 | bool (*get_pipe_config)(struct intel_crtc *, |
Ander Conselvan de Oliveira | 5cec258 | 2015-01-15 14:55:21 +0200 | [diff] [blame] | 409 | struct intel_crtc_state *); |
Damien Lespiau | 5724dbd | 2015-01-20 12:51:52 +0000 | [diff] [blame] | 410 | void (*get_initial_plane_config)(struct intel_crtc *, |
| 411 | struct intel_initial_plane_config *); |
Ander Conselvan de Oliveira | 190f68c | 2015-01-15 14:55:23 +0200 | [diff] [blame] | 412 | int (*crtc_compute_clock)(struct intel_crtc *crtc, |
| 413 | struct intel_crtc_state *crtc_state); |
Maarten Lankhorst | 4a80655 | 2016-08-09 17:04:01 +0200 | [diff] [blame] | 414 | void (*crtc_enable)(struct intel_crtc_state *pipe_config, |
| 415 | struct drm_atomic_state *old_state); |
| 416 | void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, |
| 417 | struct drm_atomic_state *old_state); |
Maarten Lankhorst | b44d5c0 | 2017-09-04 12:48:33 +0200 | [diff] [blame] | 418 | void (*update_crtcs)(struct drm_atomic_state *state); |
Ville Syrjälä | 8ec47de | 2017-10-30 20:46:53 +0200 | [diff] [blame] | 419 | void (*audio_codec_enable)(struct intel_encoder *encoder, |
| 420 | const struct intel_crtc_state *crtc_state, |
| 421 | const struct drm_connector_state *conn_state); |
| 422 | void (*audio_codec_disable)(struct intel_encoder *encoder, |
| 423 | const struct intel_crtc_state *old_crtc_state, |
| 424 | const struct drm_connector_state *old_conn_state); |
Ander Conselvan de Oliveira | dc4a109 | 2017-03-02 14:58:54 +0200 | [diff] [blame] | 425 | void (*fdi_link_train)(struct intel_crtc *crtc, |
| 426 | const struct intel_crtc_state *crtc_state); |
Ville Syrjälä | 46f16e6 | 2016-10-31 22:37:22 +0200 | [diff] [blame] | 427 | void (*init_clock_gating)(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | 91d1425 | 2016-05-06 14:48:28 +0100 | [diff] [blame] | 428 | void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 429 | /* clock updates for mode set */ |
| 430 | /* cursor updates */ |
| 431 | /* render clock increase/decrease */ |
| 432 | /* display clock increase/decrease */ |
| 433 | /* pll clock increase/decrease */ |
Lionel Landwerlin | 8563b1e | 2016-03-16 10:57:14 +0000 | [diff] [blame] | 434 | |
Maarten Lankhorst | b95c532 | 2016-03-30 17:16:34 +0200 | [diff] [blame] | 435 | void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); |
| 436 | void (*load_luts)(struct drm_crtc_state *crtc_state); |
Jesse Barnes | e70236a | 2009-09-21 10:42:27 -0700 | [diff] [blame] | 437 | }; |
| 438 | |
Damien Lespiau | b6e7d89 | 2015-10-27 14:46:59 +0200 | [diff] [blame] | 439 | #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) |
| 440 | #define CSR_VERSION_MAJOR(version) ((version) >> 16) |
| 441 | #define CSR_VERSION_MINOR(version) ((version) & 0xffff) |
| 442 | |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 443 | struct intel_csr { |
Daniel Vetter | 8144ac5 | 2015-10-28 23:59:04 +0200 | [diff] [blame] | 444 | struct work_struct work; |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 445 | const char *fw_path; |
Animesh Manna | a7f749f | 2015-08-03 21:55:32 +0530 | [diff] [blame] | 446 | uint32_t *dmc_payload; |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 447 | uint32_t dmc_fw_size; |
Damien Lespiau | b6e7d89 | 2015-10-27 14:46:59 +0200 | [diff] [blame] | 448 | uint32_t version; |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 449 | uint32_t mmio_count; |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 450 | i915_reg_t mmioaddr[8]; |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 451 | uint32_t mmiodata[8]; |
Patrik Jakobsson | 832dba8 | 2016-02-18 17:21:11 +0200 | [diff] [blame] | 452 | uint32_t dc_state; |
Imre Deak | a37baf3 | 2016-02-29 22:49:03 +0200 | [diff] [blame] | 453 | uint32_t allowed_dc_mask; |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 454 | }; |
| 455 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 456 | struct intel_display_error_state; |
| 457 | |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 458 | struct i915_gpu_state { |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 459 | struct kref ref; |
Arnd Bergmann | c6270db | 2018-01-17 16:48:53 +0100 | [diff] [blame] | 460 | ktime_t time; |
| 461 | ktime_t boottime; |
| 462 | ktime_t uptime; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 463 | |
Chris Wilson | 9f267eb | 2016-10-12 10:05:19 +0100 | [diff] [blame] | 464 | struct drm_i915_private *i915; |
| 465 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 466 | char error_msg[128]; |
| 467 | bool simulated; |
Chris Wilson | f73b567 | 2017-03-02 15:03:56 +0000 | [diff] [blame] | 468 | bool awake; |
Chris Wilson | e5aac87 | 2017-03-02 15:15:44 +0000 | [diff] [blame] | 469 | bool wakelock; |
| 470 | bool suspended; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 471 | int iommu; |
| 472 | u32 reset_count; |
| 473 | u32 suspend_count; |
| 474 | struct intel_device_info device_info; |
Chris Wilson | 642c8a7 | 2017-02-06 21:36:07 +0000 | [diff] [blame] | 475 | struct i915_params params; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 476 | |
Michal Wajdeczko | 7d41ef3 | 2017-10-26 17:36:55 +0000 | [diff] [blame] | 477 | struct i915_error_uc { |
| 478 | struct intel_uc_fw guc_fw; |
| 479 | struct intel_uc_fw huc_fw; |
Michal Wajdeczko | 0397ac1 | 2017-10-26 17:36:56 +0000 | [diff] [blame] | 480 | struct drm_i915_error_object *guc_log; |
Michal Wajdeczko | 7d41ef3 | 2017-10-26 17:36:55 +0000 | [diff] [blame] | 481 | } uc; |
| 482 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 483 | /* Generic register state */ |
| 484 | u32 eir; |
| 485 | u32 pgtbl_er; |
| 486 | u32 ier; |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 487 | u32 gtier[4], ngtier; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 488 | u32 ccid; |
| 489 | u32 derrmr; |
| 490 | u32 forcewake; |
| 491 | u32 error; /* gen6+ */ |
| 492 | u32 err_int; /* gen7 */ |
| 493 | u32 fault_data0; /* gen8, gen9 */ |
| 494 | u32 fault_data1; /* gen8, gen9 */ |
| 495 | u32 done_reg; |
| 496 | u32 gac_eco; |
| 497 | u32 gam_ecochk; |
| 498 | u32 gab_ctl; |
| 499 | u32 gfx_mode; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 500 | |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 501 | u32 nfence; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 502 | u64 fence[I915_MAX_NUM_FENCES]; |
| 503 | struct intel_overlay_error_state *overlay; |
| 504 | struct intel_display_error_state *display; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 505 | |
| 506 | struct drm_i915_error_engine { |
| 507 | int engine_id; |
| 508 | /* Software tracked state */ |
Chris Wilson | 398c8a3 | 2017-12-19 13:14:19 +0000 | [diff] [blame] | 509 | bool idle; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 510 | bool waiting; |
| 511 | int num_waiters; |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 512 | unsigned long hangcheck_timestamp; |
| 513 | bool hangcheck_stalled; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 514 | enum intel_engine_hangcheck_action hangcheck_action; |
| 515 | struct i915_address_space *vm; |
| 516 | int num_requests; |
Michel Thierry | 702c8f8 | 2017-06-20 10:57:48 +0100 | [diff] [blame] | 517 | u32 reset_count; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 518 | |
Chris Wilson | cdb324b | 2016-10-04 21:11:30 +0100 | [diff] [blame] | 519 | /* position of active request inside the ring */ |
| 520 | u32 rq_head, rq_post, rq_tail; |
| 521 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 522 | /* our own tracking of ring head and tail */ |
| 523 | u32 cpu_ring_head; |
| 524 | u32 cpu_ring_tail; |
| 525 | |
| 526 | u32 last_seqno; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 527 | |
| 528 | /* Register state */ |
| 529 | u32 start; |
| 530 | u32 tail; |
| 531 | u32 head; |
| 532 | u32 ctl; |
Chris Wilson | 21a2c58 | 2016-08-15 10:49:11 +0100 | [diff] [blame] | 533 | u32 mode; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 534 | u32 hws; |
| 535 | u32 ipeir; |
| 536 | u32 ipehr; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 537 | u32 bbstate; |
| 538 | u32 instpm; |
| 539 | u32 instps; |
| 540 | u32 seqno; |
| 541 | u64 bbaddr; |
| 542 | u64 acthd; |
| 543 | u32 fault_reg; |
| 544 | u64 faddr; |
| 545 | u32 rc_psmi; /* sleep state */ |
| 546 | u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; |
Ben Widawsky | d636951 | 2016-09-20 16:54:32 +0300 | [diff] [blame] | 547 | struct intel_instdone instdone; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 548 | |
Chris Wilson | 4fa6053 | 2017-01-29 09:24:33 +0000 | [diff] [blame] | 549 | struct drm_i915_error_context { |
| 550 | char comm[TASK_COMM_LEN]; |
| 551 | pid_t pid; |
| 552 | u32 handle; |
| 553 | u32 hw_id; |
Chris Wilson | 1f18122 | 2017-10-03 21:34:50 +0100 | [diff] [blame] | 554 | int priority; |
Chris Wilson | 4fa6053 | 2017-01-29 09:24:33 +0000 | [diff] [blame] | 555 | int ban_score; |
| 556 | int active; |
| 557 | int guilty; |
| 558 | } context; |
| 559 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 560 | struct drm_i915_error_object { |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 561 | u64 gtt_offset; |
Chris Wilson | 03382df | 2016-08-15 10:49:09 +0100 | [diff] [blame] | 562 | u64 gtt_size; |
Chris Wilson | 0a97015 | 2016-10-12 10:05:22 +0100 | [diff] [blame] | 563 | int page_count; |
| 564 | int unused; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 565 | u32 *pages[0]; |
| 566 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
| 567 | |
Chris Wilson | b0fd47a | 2017-04-15 10:39:02 +0100 | [diff] [blame] | 568 | struct drm_i915_error_object **user_bo; |
| 569 | long user_bo_count; |
| 570 | |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 571 | struct drm_i915_error_object *wa_ctx; |
Chris Wilson | 4e90a6e2 | 2017-11-26 22:09:01 +0000 | [diff] [blame] | 572 | struct drm_i915_error_object *default_state; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 573 | |
| 574 | struct drm_i915_error_request { |
| 575 | long jiffies; |
Chris Wilson | c84455b | 2016-08-15 10:49:08 +0100 | [diff] [blame] | 576 | pid_t pid; |
Chris Wilson | 35ca039 | 2016-10-13 11:18:14 +0100 | [diff] [blame] | 577 | u32 context; |
Chris Wilson | 1f18122 | 2017-10-03 21:34:50 +0100 | [diff] [blame] | 578 | int priority; |
Mika Kuoppala | 8410217 | 2016-11-16 17:20:32 +0200 | [diff] [blame] | 579 | int ban_score; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 580 | u32 seqno; |
| 581 | u32 head; |
| 582 | u32 tail; |
Mika Kuoppala | 76e7008 | 2017-09-22 15:43:07 +0300 | [diff] [blame] | 583 | } *requests, execlist[EXECLIST_MAX_PORTS]; |
| 584 | unsigned int num_ports; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 585 | |
| 586 | struct drm_i915_error_waiter { |
| 587 | char comm[TASK_COMM_LEN]; |
| 588 | pid_t pid; |
| 589 | u32 seqno; |
| 590 | } *waiters; |
| 591 | |
| 592 | struct { |
| 593 | u32 gfx_mode; |
| 594 | union { |
| 595 | u64 pdp[4]; |
| 596 | u32 pp_dir_base; |
| 597 | }; |
| 598 | } vm_info; |
Chris Wilson | 2bd160a | 2016-08-15 10:48:45 +0100 | [diff] [blame] | 599 | } engine[I915_NUM_ENGINES]; |
| 600 | |
| 601 | struct drm_i915_error_buffer { |
| 602 | u32 size; |
| 603 | u32 name; |
| 604 | u32 rseqno[I915_NUM_ENGINES], wseqno; |
| 605 | u64 gtt_offset; |
| 606 | u32 read_domains; |
| 607 | u32 write_domain; |
| 608 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
| 609 | u32 tiling:2; |
| 610 | u32 dirty:1; |
| 611 | u32 purgeable:1; |
| 612 | u32 userptr:1; |
| 613 | s32 engine:4; |
| 614 | u32 cache_level:3; |
| 615 | } *active_bo[I915_NUM_ENGINES], *pinned_bo; |
| 616 | u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; |
| 617 | struct i915_address_space *active_vm[I915_NUM_ENGINES]; |
| 618 | }; |
| 619 | |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 620 | enum i915_cache_level { |
| 621 | I915_CACHE_NONE = 0, |
Chris Wilson | 350ec88 | 2013-08-06 13:17:02 +0100 | [diff] [blame] | 622 | I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ |
| 623 | I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc |
| 624 | caches, eg sampler/render caches, and the |
| 625 | large Last-Level-Cache. LLC is coherent with |
| 626 | the CPU, but L3 is only visible to the GPU. */ |
Chris Wilson | 651d794 | 2013-08-08 14:41:10 +0100 | [diff] [blame] | 627 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ |
Daniel Vetter | 7faf1ab | 2013-01-24 14:44:55 -0800 | [diff] [blame] | 628 | }; |
| 629 | |
Chris Wilson | 85fd4f5 | 2016-12-05 14:29:36 +0000 | [diff] [blame] | 630 | #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ |
| 631 | |
Paulo Zanoni | a4001f1 | 2015-02-13 17:23:44 -0200 | [diff] [blame] | 632 | enum fb_op_origin { |
| 633 | ORIGIN_GTT, |
| 634 | ORIGIN_CPU, |
| 635 | ORIGIN_CS, |
| 636 | ORIGIN_FLIP, |
Paulo Zanoni | 74b4ea1 | 2015-07-14 16:29:14 -0300 | [diff] [blame] | 637 | ORIGIN_DIRTYFB, |
Paulo Zanoni | a4001f1 | 2015-02-13 17:23:44 -0200 | [diff] [blame] | 638 | }; |
| 639 | |
Paulo Zanoni | ab34a7e | 2016-01-11 17:44:36 -0200 | [diff] [blame] | 640 | struct intel_fbc { |
Paulo Zanoni | 25ad93f | 2015-07-02 19:25:10 -0300 | [diff] [blame] | 641 | /* This is always the inner lock when overlapping with struct_mutex and |
| 642 | * it's the outer lock when overlapping with stolen_lock. */ |
| 643 | struct mutex lock; |
Ben Widawsky | 5e59f71 | 2014-06-30 10:41:24 -0700 | [diff] [blame] | 644 | unsigned threshold; |
Paulo Zanoni | dbef0f1 | 2015-02-13 17:23:46 -0200 | [diff] [blame] | 645 | unsigned int possible_framebuffer_bits; |
| 646 | unsigned int busy_bits; |
Paulo Zanoni | 010cf73 | 2016-01-19 11:35:48 -0200 | [diff] [blame] | 647 | unsigned int visible_pipes_mask; |
Paulo Zanoni | e35fef2 | 2015-02-09 14:46:29 -0200 | [diff] [blame] | 648 | struct intel_crtc *crtc; |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 649 | |
Ben Widawsky | c421388 | 2014-06-19 12:06:10 -0700 | [diff] [blame] | 650 | struct drm_mm_node compressed_fb; |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 651 | struct drm_mm_node *compressed_llb; |
| 652 | |
Rodrigo Vivi | da46f93 | 2014-08-01 02:04:45 -0700 | [diff] [blame] | 653 | bool false_color; |
| 654 | |
Paulo Zanoni | d029bca | 2015-10-15 10:44:46 -0300 | [diff] [blame] | 655 | bool enabled; |
Paulo Zanoni | 0e631ad | 2015-10-14 17:45:36 -0300 | [diff] [blame] | 656 | bool active; |
Paulo Zanoni | 9adccc6 | 2014-09-19 16:04:55 -0300 | [diff] [blame] | 657 | |
Paulo Zanoni | 61a585d | 2016-09-13 10:38:57 -0300 | [diff] [blame] | 658 | bool underrun_detected; |
| 659 | struct work_struct underrun_work; |
| 660 | |
Paulo Zanoni | 525a4f9 | 2017-07-14 16:38:22 -0300 | [diff] [blame] | 661 | /* |
| 662 | * Due to the atomic rules we can't access some structures without the |
| 663 | * appropriate locking, so we cache information here in order to avoid |
| 664 | * these problems. |
| 665 | */ |
Paulo Zanoni | aaf78d2 | 2016-01-19 11:35:42 -0200 | [diff] [blame] | 666 | struct intel_fbc_state_cache { |
Chris Wilson | be1e341 | 2017-01-16 15:21:27 +0000 | [diff] [blame] | 667 | struct i915_vma *vma; |
| 668 | |
Paulo Zanoni | aaf78d2 | 2016-01-19 11:35:42 -0200 | [diff] [blame] | 669 | struct { |
| 670 | unsigned int mode_flags; |
| 671 | uint32_t hsw_bdw_pixel_rate; |
| 672 | } crtc; |
| 673 | |
| 674 | struct { |
| 675 | unsigned int rotation; |
| 676 | int src_w; |
| 677 | int src_h; |
| 678 | bool visible; |
Juha-Pekka Heikkila | bf0a5d4 | 2017-10-17 23:08:07 +0300 | [diff] [blame] | 679 | /* |
| 680 | * Display surface base address adjustement for |
| 681 | * pageflips. Note that on gen4+ this only adjusts up |
| 682 | * to a tile, offsets within a tile are handled in |
| 683 | * the hw itself (with the TILEOFF register). |
| 684 | */ |
| 685 | int adjusted_x; |
| 686 | int adjusted_y; |
Juha-Pekka Heikkila | 31d1d3c | 2017-10-17 23:08:11 +0300 | [diff] [blame] | 687 | |
| 688 | int y; |
Paulo Zanoni | aaf78d2 | 2016-01-19 11:35:42 -0200 | [diff] [blame] | 689 | } plane; |
| 690 | |
| 691 | struct { |
Ville Syrjälä | 801c8fe | 2016-11-18 21:53:04 +0200 | [diff] [blame] | 692 | const struct drm_format_info *format; |
Paulo Zanoni | aaf78d2 | 2016-01-19 11:35:42 -0200 | [diff] [blame] | 693 | unsigned int stride; |
Paulo Zanoni | aaf78d2 | 2016-01-19 11:35:42 -0200 | [diff] [blame] | 694 | } fb; |
| 695 | } state_cache; |
| 696 | |
Paulo Zanoni | 525a4f9 | 2017-07-14 16:38:22 -0300 | [diff] [blame] | 697 | /* |
| 698 | * This structure contains everything that's relevant to program the |
| 699 | * hardware registers. When we want to figure out if we need to disable |
| 700 | * and re-enable FBC for a new configuration we just check if there's |
| 701 | * something different in the struct. The genx_fbc_activate functions |
| 702 | * are supposed to read from it in order to program the registers. |
| 703 | */ |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 704 | struct intel_fbc_reg_params { |
Chris Wilson | be1e341 | 2017-01-16 15:21:27 +0000 | [diff] [blame] | 705 | struct i915_vma *vma; |
| 706 | |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 707 | struct { |
| 708 | enum pipe pipe; |
Ville Syrjälä | ed15030 | 2017-11-17 21:19:10 +0200 | [diff] [blame] | 709 | enum i9xx_plane_id i9xx_plane; |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 710 | unsigned int fence_y_offset; |
| 711 | } crtc; |
| 712 | |
| 713 | struct { |
Ville Syrjälä | 801c8fe | 2016-11-18 21:53:04 +0200 | [diff] [blame] | 714 | const struct drm_format_info *format; |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 715 | unsigned int stride; |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 716 | } fb; |
| 717 | |
| 718 | int cfb_size; |
Praveen Paneri | 5654a16 | 2017-08-11 00:00:33 +0530 | [diff] [blame] | 719 | unsigned int gen9_wa_cfb_stride; |
Paulo Zanoni | b183b3f | 2015-12-23 18:28:11 -0200 | [diff] [blame] | 720 | } params; |
| 721 | |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 722 | struct intel_fbc_work { |
Paulo Zanoni | 128d735 | 2015-10-26 16:27:49 -0200 | [diff] [blame] | 723 | bool scheduled; |
Paulo Zanoni | ca18d51 | 2016-01-21 18:03:05 -0200 | [diff] [blame] | 724 | u32 scheduled_vblank; |
Paulo Zanoni | 128d735 | 2015-10-26 16:27:49 -0200 | [diff] [blame] | 725 | struct work_struct work; |
Paulo Zanoni | 128d735 | 2015-10-26 16:27:49 -0200 | [diff] [blame] | 726 | } work; |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 727 | |
Paulo Zanoni | bf6189c | 2015-10-27 14:50:03 -0200 | [diff] [blame] | 728 | const char *no_fbc_reason; |
Jesse Barnes | b5e50c3 | 2010-02-05 12:42:41 -0800 | [diff] [blame] | 729 | }; |
| 730 | |
Chris Wilson | fe88d12 | 2016-12-31 11:20:12 +0000 | [diff] [blame] | 731 | /* |
Vandana Kannan | 96178ee | 2015-01-10 02:25:56 +0530 | [diff] [blame] | 732 | * HIGH_RR is the highest eDP panel refresh rate read from EDID |
| 733 | * LOW_RR is the lowest eDP panel refresh rate found from EDID |
| 734 | * parsing for same resolution. |
| 735 | */ |
| 736 | enum drrs_refresh_rate_type { |
| 737 | DRRS_HIGH_RR, |
| 738 | DRRS_LOW_RR, |
| 739 | DRRS_MAX_RR, /* RR count */ |
| 740 | }; |
| 741 | |
| 742 | enum drrs_support_type { |
| 743 | DRRS_NOT_SUPPORTED = 0, |
| 744 | STATIC_DRRS_SUPPORT = 1, |
| 745 | SEAMLESS_DRRS_SUPPORT = 2 |
Pradeep Bhat | 439d7ac | 2014-04-05 12:13:28 +0530 | [diff] [blame] | 746 | }; |
| 747 | |
Daniel Vetter | 2807cf6 | 2014-07-11 10:30:11 -0700 | [diff] [blame] | 748 | struct intel_dp; |
Vandana Kannan | 96178ee | 2015-01-10 02:25:56 +0530 | [diff] [blame] | 749 | struct i915_drrs { |
| 750 | struct mutex mutex; |
| 751 | struct delayed_work work; |
| 752 | struct intel_dp *dp; |
| 753 | unsigned busy_frontbuffer_bits; |
| 754 | enum drrs_refresh_rate_type refresh_rate_type; |
| 755 | enum drrs_support_type type; |
| 756 | }; |
| 757 | |
Rodrigo Vivi | a031d70 | 2013-10-03 16:15:06 -0300 | [diff] [blame] | 758 | struct i915_psr { |
Daniel Vetter | f0355c4 | 2014-07-11 10:30:15 -0700 | [diff] [blame] | 759 | struct mutex lock; |
Rodrigo Vivi | a031d70 | 2013-10-03 16:15:06 -0300 | [diff] [blame] | 760 | bool sink_support; |
Daniel Vetter | 2807cf6 | 2014-07-11 10:30:11 -0700 | [diff] [blame] | 761 | struct intel_dp *enabled; |
Rodrigo Vivi | 7c8f8a7 | 2014-06-13 05:10:03 -0700 | [diff] [blame] | 762 | bool active; |
| 763 | struct delayed_work work; |
Daniel Vetter | 9ca1530 | 2014-07-11 10:30:16 -0700 | [diff] [blame] | 764 | unsigned busy_frontbuffer_bits; |
Sonika Jindal | 474d1ec | 2015-04-02 11:02:44 +0530 | [diff] [blame] | 765 | bool psr2_support; |
| 766 | bool aux_frame_sync; |
Rodrigo Vivi | 60e5ffe | 2016-02-01 12:02:07 -0800 | [diff] [blame] | 767 | bool link_standby; |
Nagaraju, Vathsala | 97da2ef | 2017-01-02 17:00:55 +0530 | [diff] [blame] | 768 | bool y_cord_support; |
| 769 | bool colorimetry_support; |
Nagaraju, Vathsala | 340c93c | 2017-01-02 17:00:58 +0530 | [diff] [blame] | 770 | bool alpm; |
Rodrigo Vivi | 424644c | 2017-09-07 16:00:32 -0700 | [diff] [blame] | 771 | |
Rodrigo Vivi | d0d5e0d | 2017-09-07 16:00:41 -0700 | [diff] [blame] | 772 | void (*enable_source)(struct intel_dp *, |
| 773 | const struct intel_crtc_state *); |
Rodrigo Vivi | 424644c | 2017-09-07 16:00:32 -0700 | [diff] [blame] | 774 | void (*disable_source)(struct intel_dp *, |
| 775 | const struct intel_crtc_state *); |
Rodrigo Vivi | 49ad316 | 2017-09-07 16:00:40 -0700 | [diff] [blame] | 776 | void (*enable_sink)(struct intel_dp *); |
Rodrigo Vivi | e3702ac | 2017-09-07 16:00:34 -0700 | [diff] [blame] | 777 | void (*activate)(struct intel_dp *); |
Rodrigo Vivi | 2a5db87 | 2017-09-07 16:00:39 -0700 | [diff] [blame] | 778 | void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); |
Rodrigo Vivi | 3f51e47 | 2013-07-11 18:45:00 -0300 | [diff] [blame] | 779 | }; |
Ben Widawsky | 5c3fe8b | 2013-06-27 16:30:21 -0700 | [diff] [blame] | 780 | |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 781 | enum intel_pch { |
Paulo Zanoni | f035083 | 2012-07-03 18:48:16 -0300 | [diff] [blame] | 782 | PCH_NONE = 0, /* No PCH present */ |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 783 | PCH_IBX, /* Ibexpeak PCH */ |
Ville Syrjälä | 243dec5 | 2017-06-20 16:03:08 +0300 | [diff] [blame] | 784 | PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ |
| 785 | PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ |
Satheeshakrishna M | e7e7ea2 | 2014-04-09 11:08:57 +0530 | [diff] [blame] | 786 | PCH_SPT, /* Sunrisepoint PCH */ |
Rodrigo Vivi | 23247d7 | 2017-07-31 11:52:20 -0700 | [diff] [blame] | 787 | PCH_KBP, /* Kaby Lake PCH */ |
| 788 | PCH_CNP, /* Cannon Lake PCH */ |
Anusha Srivatsa | 0b58436 | 2018-01-11 16:00:05 -0200 | [diff] [blame] | 789 | PCH_ICP, /* Ice Lake PCH */ |
Ben Widawsky | 40c7ead | 2013-04-05 13:12:40 -0700 | [diff] [blame] | 790 | PCH_NOP, |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 791 | }; |
| 792 | |
Paulo Zanoni | 988d6ee | 2012-12-01 12:04:24 -0200 | [diff] [blame] | 793 | enum intel_sbi_destination { |
| 794 | SBI_ICLK, |
| 795 | SBI_MPHY, |
| 796 | }; |
| 797 | |
Keith Packard | 435793d | 2011-07-12 14:56:22 -0700 | [diff] [blame] | 798 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
Carsten Emde | 4dca20e | 2012-03-15 15:56:26 +0100 | [diff] [blame] | 799 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
Scot Doyle | 9c72cc6 | 2014-07-03 23:27:50 +0000 | [diff] [blame] | 800 | #define QUIRK_BACKLIGHT_PRESENT (1<<3) |
Daniel Vetter | 656bfa3 | 2014-11-20 09:26:30 +0100 | [diff] [blame] | 801 | #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) |
Manasi Navare | c99a259 | 2017-06-30 09:33:48 -0700 | [diff] [blame] | 802 | #define QUIRK_INCREASE_T12_DELAY (1<<6) |
Jesse Barnes | b690e96 | 2010-07-19 13:53:12 -0700 | [diff] [blame] | 803 | |
Dave Airlie | 8be48d9 | 2010-03-30 05:34:14 +0000 | [diff] [blame] | 804 | struct intel_fbdev; |
Chris Wilson | 1630fe7 | 2011-07-08 12:22:42 +0100 | [diff] [blame] | 805 | struct intel_fbc_work; |
Dave Airlie | 3865167 | 2010-03-30 05:34:13 +0000 | [diff] [blame] | 806 | |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 807 | struct intel_gmbus { |
| 808 | struct i2c_adapter adapter; |
Ville Syrjälä | 3e4d44e | 2016-03-07 17:56:59 +0200 | [diff] [blame] | 809 | #define GMBUS_FORCE_BIT_RETRY (1U << 31) |
Chris Wilson | f2ce9fa | 2012-11-10 15:58:21 +0000 | [diff] [blame] | 810 | u32 force_bit; |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 811 | u32 reg0; |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 812 | i915_reg_t gpio_reg; |
Daniel Vetter | c167a6f | 2012-02-28 00:43:09 +0100 | [diff] [blame] | 813 | struct i2c_algo_bit_data bit_algo; |
Daniel Vetter | c2b9152 | 2012-02-14 22:37:19 +0100 | [diff] [blame] | 814 | struct drm_i915_private *dev_priv; |
| 815 | }; |
| 816 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 817 | struct i915_suspend_saved_registers { |
Keith Packard | e948e99 | 2008-05-07 12:27:53 +1000 | [diff] [blame] | 818 | u32 saveDSPARB; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 819 | u32 saveFBC_CONTROL; |
Keith Packard | 1f84e55 | 2008-02-16 19:19:29 -0800 | [diff] [blame] | 820 | u32 saveCACHE_MODE_0; |
Keith Packard | 1f84e55 | 2008-02-16 19:19:29 -0800 | [diff] [blame] | 821 | u32 saveMI_ARB_STATE; |
Jesse Barnes | ba8bbcf | 2007-11-22 14:14:14 +1000 | [diff] [blame] | 822 | u32 saveSWF0[16]; |
| 823 | u32 saveSWF1[16]; |
Ville Syrjälä | 85fa792 | 2015-09-18 20:03:43 +0300 | [diff] [blame] | 824 | u32 saveSWF3[3]; |
Daniel Vetter | 4b9de73 | 2011-10-09 21:52:02 +0200 | [diff] [blame] | 825 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
Adam Jackson | cda2bb7 | 2011-07-26 16:53:06 -0400 | [diff] [blame] | 826 | u32 savePCH_PORT_HOTPLUG; |
Jesse Barnes | 9f49c37 | 2014-12-10 12:16:05 -0800 | [diff] [blame] | 827 | u16 saveGCDGMBUS; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 828 | }; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 829 | |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 830 | struct vlv_s0ix_state { |
| 831 | /* GAM */ |
| 832 | u32 wr_watermark; |
| 833 | u32 gfx_prio_ctrl; |
| 834 | u32 arb_mode; |
| 835 | u32 gfx_pend_tlb0; |
| 836 | u32 gfx_pend_tlb1; |
| 837 | u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; |
| 838 | u32 media_max_req_count; |
| 839 | u32 gfx_max_req_count; |
| 840 | u32 render_hwsp; |
| 841 | u32 ecochk; |
| 842 | u32 bsd_hwsp; |
| 843 | u32 blt_hwsp; |
| 844 | u32 tlb_rd_addr; |
| 845 | |
| 846 | /* MBC */ |
| 847 | u32 g3dctl; |
| 848 | u32 gsckgctl; |
| 849 | u32 mbctl; |
| 850 | |
| 851 | /* GCP */ |
| 852 | u32 ucgctl1; |
| 853 | u32 ucgctl3; |
| 854 | u32 rcgctl1; |
| 855 | u32 rcgctl2; |
| 856 | u32 rstctl; |
| 857 | u32 misccpctl; |
| 858 | |
| 859 | /* GPM */ |
| 860 | u32 gfxpause; |
| 861 | u32 rpdeuhwtc; |
| 862 | u32 rpdeuc; |
| 863 | u32 ecobus; |
| 864 | u32 pwrdwnupctl; |
| 865 | u32 rp_down_timeout; |
| 866 | u32 rp_deucsw; |
| 867 | u32 rcubmabdtmr; |
| 868 | u32 rcedata; |
| 869 | u32 spare2gh; |
| 870 | |
| 871 | /* Display 1 CZ domain */ |
| 872 | u32 gt_imr; |
| 873 | u32 gt_ier; |
| 874 | u32 pm_imr; |
| 875 | u32 pm_ier; |
| 876 | u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; |
| 877 | |
| 878 | /* GT SA CZ domain */ |
| 879 | u32 tilectl; |
| 880 | u32 gt_fifoctl; |
| 881 | u32 gtlc_wake_ctrl; |
| 882 | u32 gtlc_survive; |
| 883 | u32 pmwgicz; |
| 884 | |
| 885 | /* Display 2 CZ domain */ |
| 886 | u32 gu_ctl0; |
| 887 | u32 gu_ctl1; |
Jesse Barnes | 9c25210 | 2015-04-01 14:22:57 -0700 | [diff] [blame] | 888 | u32 pcbr; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 889 | u32 clock_gate_dis2; |
| 890 | }; |
| 891 | |
Chris Wilson | bf225f2 | 2014-07-10 20:31:18 +0100 | [diff] [blame] | 892 | struct intel_rps_ei { |
Mika Kuoppala | 679cb6c | 2017-03-15 17:43:03 +0200 | [diff] [blame] | 893 | ktime_t ktime; |
Chris Wilson | bf225f2 | 2014-07-10 20:31:18 +0100 | [diff] [blame] | 894 | u32 render_c0; |
| 895 | u32 media_c0; |
Deepak S | 31685c2 | 2014-07-03 17:33:01 -0400 | [diff] [blame] | 896 | }; |
| 897 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 898 | struct intel_rps { |
Imre Deak | d4d70aa | 2014-11-19 15:30:04 +0200 | [diff] [blame] | 899 | /* |
| 900 | * work, interrupts_enabled and pm_iir are protected by |
| 901 | * dev_priv->irq_lock |
| 902 | */ |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 903 | struct work_struct work; |
Imre Deak | d4d70aa | 2014-11-19 15:30:04 +0200 | [diff] [blame] | 904 | bool interrupts_enabled; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 905 | u32 pm_iir; |
Daniel Vetter | 59cdb63 | 2013-07-04 23:35:28 +0200 | [diff] [blame] | 906 | |
Dave Gordon | b20e3cf | 2016-09-12 21:19:35 +0100 | [diff] [blame] | 907 | /* PM interrupt bits that should never be masked */ |
Sagar Arun Kamble | 5dd0455 | 2017-03-11 08:07:00 +0530 | [diff] [blame] | 908 | u32 pm_intrmsk_mbz; |
Sagar Arun Kamble | 1800ad2 | 2016-05-31 13:58:27 +0530 | [diff] [blame] | 909 | |
Ben Widawsky | b39fb29 | 2014-03-19 18:31:11 -0700 | [diff] [blame] | 910 | /* Frequencies are stored in potentially platform dependent multiples. |
| 911 | * In other words, *_freq needs to be multiplied by X to be interesting. |
| 912 | * Soft limits are those which are used for the dynamic reclocking done |
| 913 | * by the driver (raise frequencies under heavy loads, and lower for |
| 914 | * lighter loads). Hard limits are those imposed by the hardware. |
| 915 | * |
| 916 | * A distinction is made for overclocking, which is never enabled by |
| 917 | * default, and is considered to be above the hard limit if it's |
| 918 | * possible at all. |
| 919 | */ |
| 920 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ |
| 921 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ |
| 922 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ |
| 923 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ |
| 924 | u8 min_freq; /* AKA RPn. Minimum frequency */ |
Chris Wilson | 29ecd78d | 2016-07-13 09:10:35 +0100 | [diff] [blame] | 925 | u8 boost_freq; /* Frequency to request when wait boosting */ |
Chris Wilson | aed242f | 2015-03-18 09:48:21 +0000 | [diff] [blame] | 926 | u8 idle_freq; /* Frequency to request when we are idle */ |
Ben Widawsky | b39fb29 | 2014-03-19 18:31:11 -0700 | [diff] [blame] | 927 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ |
| 928 | u8 rp1_freq; /* "less than" RP0 power/freqency */ |
| 929 | u8 rp0_freq; /* Non-overclocked max frequency. */ |
Ville Syrjälä | c30fec6 | 2016-03-04 21:43:02 +0200 | [diff] [blame] | 930 | u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ |
Jesse Barnes | 1a01ab3 | 2012-11-02 11:14:00 -0700 | [diff] [blame] | 931 | |
Chris Wilson | 8fb5519 | 2015-04-07 16:20:28 +0100 | [diff] [blame] | 932 | u8 up_threshold; /* Current %busy required to uplock */ |
| 933 | u8 down_threshold; /* Current %busy required to downclock */ |
| 934 | |
Chris Wilson | dd75fdc | 2013-09-25 17:34:57 +0100 | [diff] [blame] | 935 | int last_adj; |
| 936 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; |
| 937 | |
Chris Wilson | c0951f0 | 2013-10-10 21:58:50 +0100 | [diff] [blame] | 938 | bool enabled; |
Chris Wilson | 7b92c1b | 2017-06-28 13:35:48 +0100 | [diff] [blame] | 939 | atomic_t num_waiters; |
| 940 | atomic_t boosts; |
Jesse Barnes | 4fc688c | 2012-11-02 11:14:01 -0700 | [diff] [blame] | 941 | |
Chris Wilson | bf225f2 | 2014-07-10 20:31:18 +0100 | [diff] [blame] | 942 | /* manual wa residency calculations */ |
Chris Wilson | e0e8c7c | 2017-03-09 21:12:30 +0000 | [diff] [blame] | 943 | struct intel_rps_ei ei; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 944 | }; |
| 945 | |
Sagar Arun Kamble | 37d933f | 2017-10-10 22:30:10 +0100 | [diff] [blame] | 946 | struct intel_rc6 { |
| 947 | bool enabled; |
| 948 | }; |
| 949 | |
| 950 | struct intel_llc_pstate { |
| 951 | bool enabled; |
| 952 | }; |
| 953 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 954 | struct intel_gen6_power_mgmt { |
| 955 | struct intel_rps rps; |
Sagar Arun Kamble | 37d933f | 2017-10-10 22:30:10 +0100 | [diff] [blame] | 956 | struct intel_rc6 rc6; |
| 957 | struct intel_llc_pstate llc_pstate; |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 958 | }; |
| 959 | |
Daniel Vetter | 1a240d4 | 2012-11-29 22:18:51 +0100 | [diff] [blame] | 960 | /* defined intel_pm.c */ |
| 961 | extern spinlock_t mchdev_lock; |
| 962 | |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 963 | struct intel_ilk_power_mgmt { |
| 964 | u8 cur_delay; |
| 965 | u8 min_delay; |
| 966 | u8 max_delay; |
| 967 | u8 fmax; |
| 968 | u8 fstart; |
| 969 | |
| 970 | u64 last_count1; |
| 971 | unsigned long last_time1; |
| 972 | unsigned long chipset_power; |
| 973 | u64 last_count2; |
Thomas Gleixner | 5ed0bdf | 2014-07-16 21:05:06 +0000 | [diff] [blame] | 974 | u64 last_time2; |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 975 | unsigned long gfx_power; |
| 976 | u8 corr; |
| 977 | |
| 978 | int c_m; |
| 979 | int r_t; |
| 980 | }; |
| 981 | |
Imre Deak | c6cb582 | 2014-03-04 19:22:55 +0200 | [diff] [blame] | 982 | struct drm_i915_private; |
| 983 | struct i915_power_well; |
| 984 | |
| 985 | struct i915_power_well_ops { |
| 986 | /* |
| 987 | * Synchronize the well's hw state to match the current sw state, for |
| 988 | * example enable/disable it based on the current refcount. Called |
| 989 | * during driver init and resume time, possibly after first calling |
| 990 | * the enable/disable handlers. |
| 991 | */ |
| 992 | void (*sync_hw)(struct drm_i915_private *dev_priv, |
| 993 | struct i915_power_well *power_well); |
| 994 | /* |
| 995 | * Enable the well and resources that depend on it (for example |
| 996 | * interrupts located on the well). Called after the 0->1 refcount |
| 997 | * transition. |
| 998 | */ |
| 999 | void (*enable)(struct drm_i915_private *dev_priv, |
| 1000 | struct i915_power_well *power_well); |
| 1001 | /* |
| 1002 | * Disable the well and resources that depend on it. Called after |
| 1003 | * the 1->0 refcount transition. |
| 1004 | */ |
| 1005 | void (*disable)(struct drm_i915_private *dev_priv, |
| 1006 | struct i915_power_well *power_well); |
| 1007 | /* Returns the hw enabled state. */ |
| 1008 | bool (*is_enabled)(struct drm_i915_private *dev_priv, |
| 1009 | struct i915_power_well *power_well); |
| 1010 | }; |
| 1011 | |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 1012 | /* Power well structure for haswell */ |
| 1013 | struct i915_power_well { |
Imre Deak | c1ca727 | 2013-11-25 17:15:29 +0200 | [diff] [blame] | 1014 | const char *name; |
Imre Deak | 6f3ef5d | 2013-11-25 17:15:30 +0200 | [diff] [blame] | 1015 | bool always_on; |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 1016 | /* power well enable/disable usage count */ |
| 1017 | int count; |
Imre Deak | bfafe93 | 2014-06-05 20:31:47 +0300 | [diff] [blame] | 1018 | /* cached hw enabled state */ |
| 1019 | bool hw_enabled; |
Ander Conselvan de Oliveira | d8fc70b | 2017-02-09 11:31:21 +0200 | [diff] [blame] | 1020 | u64 domains; |
Ander Conselvan de Oliveira | 01c3faa | 2016-10-06 19:22:14 +0300 | [diff] [blame] | 1021 | /* unique identifier for this power well */ |
Imre Deak | 438b8dc | 2017-07-11 23:42:30 +0300 | [diff] [blame] | 1022 | enum i915_power_well_id id; |
Ander Conselvan de Oliveira | 362624c | 2016-10-06 19:22:15 +0300 | [diff] [blame] | 1023 | /* |
| 1024 | * Arbitraty data associated with this power well. Platform and power |
| 1025 | * well specific. |
| 1026 | */ |
Imre Deak | b5565a2 | 2017-07-06 17:40:29 +0300 | [diff] [blame] | 1027 | union { |
| 1028 | struct { |
| 1029 | enum dpio_phy phy; |
| 1030 | } bxt; |
Imre Deak | 001bd2c | 2017-07-12 18:54:13 +0300 | [diff] [blame] | 1031 | struct { |
| 1032 | /* Mask of pipes whose IRQ logic is backed by the pw */ |
| 1033 | u8 irq_pipe_mask; |
| 1034 | /* The pw is backing the VGA functionality */ |
| 1035 | bool has_vga:1; |
Imre Deak | b2891eb | 2017-07-11 23:42:35 +0300 | [diff] [blame] | 1036 | bool has_fuses:1; |
Imre Deak | 001bd2c | 2017-07-12 18:54:13 +0300 | [diff] [blame] | 1037 | } hsw; |
Imre Deak | b5565a2 | 2017-07-06 17:40:29 +0300 | [diff] [blame] | 1038 | }; |
Imre Deak | c6cb582 | 2014-03-04 19:22:55 +0200 | [diff] [blame] | 1039 | const struct i915_power_well_ops *ops; |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 1040 | }; |
| 1041 | |
Imre Deak | 83c00f5 | 2013-10-25 17:36:47 +0300 | [diff] [blame] | 1042 | struct i915_power_domains { |
Imre Deak | baa7070 | 2013-10-25 17:36:48 +0300 | [diff] [blame] | 1043 | /* |
| 1044 | * Power wells needed for initialization at driver init and suspend |
| 1045 | * time are on. They are kept on until after the first modeset. |
| 1046 | */ |
| 1047 | bool init_power_on; |
Imre Deak | 0d116a2 | 2014-04-25 13:19:05 +0300 | [diff] [blame] | 1048 | bool initializing; |
Imre Deak | c1ca727 | 2013-11-25 17:15:29 +0200 | [diff] [blame] | 1049 | int power_well_count; |
Imre Deak | baa7070 | 2013-10-25 17:36:48 +0300 | [diff] [blame] | 1050 | |
Imre Deak | 83c00f5 | 2013-10-25 17:36:47 +0300 | [diff] [blame] | 1051 | struct mutex lock; |
Imre Deak | 1da5158 | 2013-11-25 17:15:35 +0200 | [diff] [blame] | 1052 | int domain_use_count[POWER_DOMAIN_NUM]; |
Imre Deak | c1ca727 | 2013-11-25 17:15:29 +0200 | [diff] [blame] | 1053 | struct i915_power_well *power_wells; |
Imre Deak | 83c00f5 | 2013-10-25 17:36:47 +0300 | [diff] [blame] | 1054 | }; |
| 1055 | |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 1056 | #define MAX_L3_SLICES 2 |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 1057 | struct intel_l3_parity { |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 1058 | u32 *remap_info[MAX_L3_SLICES]; |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 1059 | struct work_struct error_work; |
Ben Widawsky | 35a85ac | 2013-09-19 11:13:41 -0700 | [diff] [blame] | 1060 | int which_slice; |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 1061 | }; |
| 1062 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1063 | struct i915_gem_mm { |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1064 | /** Memory allocator for GTT stolen memory */ |
| 1065 | struct drm_mm stolen; |
Paulo Zanoni | 92e97d2 | 2015-07-02 19:25:09 -0300 | [diff] [blame] | 1066 | /** Protects the usage of the GTT stolen memory allocator. This is |
| 1067 | * always the inner lock when overlapping with struct_mutex. */ |
| 1068 | struct mutex stolen_lock; |
| 1069 | |
Chris Wilson | f212381 | 2017-10-16 12:40:37 +0100 | [diff] [blame] | 1070 | /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ |
| 1071 | spinlock_t obj_lock; |
| 1072 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1073 | /** List of all objects in gtt_space. Used to restore gtt |
| 1074 | * mappings on resume */ |
| 1075 | struct list_head bound_list; |
| 1076 | /** |
| 1077 | * List of objects which are not bound to the GTT (thus |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1078 | * are idle and not used by the GPU). These objects may or may |
| 1079 | * not actually have any pages attached. |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1080 | */ |
| 1081 | struct list_head unbound_list; |
| 1082 | |
Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 1083 | /** List of all objects in gtt_space, currently mmaped by userspace. |
| 1084 | * All objects within this list must also be on bound_list. |
| 1085 | */ |
| 1086 | struct list_head userfault_list; |
| 1087 | |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1088 | /** |
| 1089 | * List of objects which are pending destruction. |
| 1090 | */ |
| 1091 | struct llist_head free_list; |
| 1092 | struct work_struct free_work; |
Chris Wilson | 87701b4 | 2017-10-13 21:26:20 +0100 | [diff] [blame] | 1093 | spinlock_t free_lock; |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 1094 | |
Chris Wilson | 66df101 | 2017-08-22 18:38:28 +0100 | [diff] [blame] | 1095 | /** |
| 1096 | * Small stash of WC pages |
| 1097 | */ |
| 1098 | struct pagevec wc_stash; |
| 1099 | |
Matthew Auld | 465c403 | 2017-10-06 23:18:14 +0100 | [diff] [blame] | 1100 | /** |
| 1101 | * tmpfs instance used for shmem backed objects |
| 1102 | */ |
| 1103 | struct vfsmount *gemfs; |
| 1104 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1105 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
| 1106 | struct i915_hw_ppgtt *aliasing_ppgtt; |
| 1107 | |
Chris Wilson | 2cfcd32a | 2014-05-20 08:28:43 +0100 | [diff] [blame] | 1108 | struct notifier_block oom_notifier; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 1109 | struct notifier_block vmap_notifier; |
Chris Wilson | ceabbba5 | 2014-03-25 13:23:04 +0000 | [diff] [blame] | 1110 | struct shrinker shrinker; |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1111 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1112 | /** LRU list of objects with fence regs on them. */ |
| 1113 | struct list_head fence_list; |
| 1114 | |
Chris Wilson | 8a2421b | 2017-06-16 15:05:22 +0100 | [diff] [blame] | 1115 | /** |
| 1116 | * Workqueue to fault in userptr pages, flushed by the execbuf |
| 1117 | * when required but otherwise left to userspace to try again |
| 1118 | * on EAGAIN. |
| 1119 | */ |
| 1120 | struct workqueue_struct *userptr_wq; |
| 1121 | |
Chris Wilson | 9431282 | 2017-05-03 10:39:18 +0100 | [diff] [blame] | 1122 | u64 unordered_timeline; |
| 1123 | |
Daniel Vetter | bdf1e7e | 2014-05-21 17:37:52 +0200 | [diff] [blame] | 1124 | /* the indicator for dispatch video commands on two BSD rings */ |
Joonas Lahtinen | 6f63340 | 2016-09-01 14:58:21 +0300 | [diff] [blame] | 1125 | atomic_t bsd_engine_dispatch_index; |
Daniel Vetter | bdf1e7e | 2014-05-21 17:37:52 +0200 | [diff] [blame] | 1126 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1127 | /** Bit 6 swizzling required for X tiling */ |
| 1128 | uint32_t bit_6_swizzle_x; |
| 1129 | /** Bit 6 swizzling required for Y tiling */ |
| 1130 | uint32_t bit_6_swizzle_y; |
| 1131 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1132 | /* accounting, useful for userland debugging */ |
Daniel Vetter | c20e835 | 2013-07-24 22:40:23 +0200 | [diff] [blame] | 1133 | spinlock_t object_stat_lock; |
Chris Wilson | 3ef7f22 | 2016-10-18 13:02:48 +0100 | [diff] [blame] | 1134 | u64 object_memory; |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1135 | u32 object_count; |
| 1136 | }; |
| 1137 | |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 1138 | struct drm_i915_error_state_buf { |
Chris Wilson | 0a4cd7c | 2014-08-22 14:41:39 +0100 | [diff] [blame] | 1139 | struct drm_i915_private *i915; |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 1140 | unsigned bytes; |
| 1141 | unsigned size; |
| 1142 | int err; |
| 1143 | u8 *buf; |
| 1144 | loff_t start; |
| 1145 | loff_t pos; |
| 1146 | }; |
| 1147 | |
Chris Wilson | ee42c00 | 2017-12-11 19:41:34 +0000 | [diff] [blame] | 1148 | #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ |
| 1149 | |
Chris Wilson | b52992c | 2016-10-28 13:58:24 +0100 | [diff] [blame] | 1150 | #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ |
| 1151 | #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ |
| 1152 | |
Mika Kuoppala | 3fe3b03 | 2016-11-18 15:09:04 +0200 | [diff] [blame] | 1153 | #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ |
| 1154 | #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ |
| 1155 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1156 | struct i915_gpu_error { |
| 1157 | /* For hangcheck timer */ |
| 1158 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
| 1159 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
Mika Kuoppala | be62acb | 2013-08-30 16:19:28 +0300 | [diff] [blame] | 1160 | |
Chris Wilson | 737b150 | 2015-01-26 18:03:03 +0200 | [diff] [blame] | 1161 | struct delayed_work hangcheck_work; |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1162 | |
| 1163 | /* For reset and error_state handling. */ |
| 1164 | spinlock_t lock; |
| 1165 | /* Protected by the above dev->gpu_error.lock. */ |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 1166 | struct i915_gpu_state *first_error; |
Chris Wilson | 094f9a5 | 2013-09-25 17:34:55 +0100 | [diff] [blame] | 1167 | |
Daniel Vetter | 9db529a | 2017-08-08 10:08:28 +0200 | [diff] [blame] | 1168 | atomic_t pending_fb_pin; |
| 1169 | |
Chris Wilson | 094f9a5 | 2013-09-25 17:34:55 +0100 | [diff] [blame] | 1170 | unsigned long missed_irq_rings; |
| 1171 | |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1172 | /** |
Mika Kuoppala | 2ac0f45 | 2013-11-12 14:44:19 +0200 | [diff] [blame] | 1173 | * State variable controlling the reset flow and count |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1174 | * |
Mika Kuoppala | 2ac0f45 | 2013-11-12 14:44:19 +0200 | [diff] [blame] | 1175 | * This is a counter which gets incremented when reset is triggered, |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 1176 | * |
Michel Thierry | 56306c6 | 2017-04-18 13:23:16 -0700 | [diff] [blame] | 1177 | * Before the reset commences, the I915_RESET_BACKOFF bit is set |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 1178 | * meaning that any waiters holding onto the struct_mutex should |
| 1179 | * relinquish the lock immediately in order for the reset to start. |
Mika Kuoppala | 2ac0f45 | 2013-11-12 14:44:19 +0200 | [diff] [blame] | 1180 | * |
| 1181 | * If reset is not completed succesfully, the I915_WEDGE bit is |
| 1182 | * set meaning that hardware is terminally sour and there is no |
| 1183 | * recovery. All waiters on the reset_queue will be woken when |
| 1184 | * that happens. |
| 1185 | * |
| 1186 | * This counter is used by the wait_seqno code to notice that reset |
| 1187 | * event happened and it needs to restart the entire ioctl (since most |
| 1188 | * likely the seqno it waited for won't ever signal anytime soon). |
Daniel Vetter | f69061b | 2012-12-06 09:01:42 +0100 | [diff] [blame] | 1189 | * |
| 1190 | * This is important for lock-free wait paths, where no contended lock |
| 1191 | * naturally enforces the correct ordering between the bail-out of the |
| 1192 | * waiter and the gpu reset work code. |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1193 | */ |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 1194 | unsigned long reset_count; |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1195 | |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 1196 | /** |
| 1197 | * flags: Control various stages of the GPU reset |
| 1198 | * |
| 1199 | * #I915_RESET_BACKOFF - When we start a reset, we want to stop any |
| 1200 | * other users acquiring the struct_mutex. To do this we set the |
| 1201 | * #I915_RESET_BACKOFF bit in the error flags when we detect a reset |
| 1202 | * and then check for that bit before acquiring the struct_mutex (in |
| 1203 | * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a |
| 1204 | * secondary role in preventing two concurrent global reset attempts. |
| 1205 | * |
| 1206 | * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the |
| 1207 | * struct_mutex. We try to acquire the struct_mutex in the reset worker, |
| 1208 | * but it may be held by some long running waiter (that we cannot |
| 1209 | * interrupt without causing trouble). Once we are ready to do the GPU |
| 1210 | * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If |
| 1211 | * they already hold the struct_mutex and want to participate they can |
| 1212 | * inspect the bit and do the reset directly, otherwise the worker |
| 1213 | * waits for the struct_mutex. |
| 1214 | * |
Michel Thierry | 142bc7d | 2017-06-20 10:57:46 +0100 | [diff] [blame] | 1215 | * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to |
| 1216 | * acquire the struct_mutex to reset an engine, we need an explicit |
| 1217 | * flag to prevent two concurrent reset attempts in the same engine. |
| 1218 | * As the number of engines continues to grow, allocate the flags from |
| 1219 | * the most significant bits. |
| 1220 | * |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 1221 | * #I915_WEDGED - If reset fails and we can no longer use the GPU, |
| 1222 | * we set the #I915_WEDGED bit. Prior to command submission, e.g. |
| 1223 | * i915_gem_request_alloc(), this bit is checked and the sequence |
| 1224 | * aborted (with -EIO reported to userspace) if set. |
| 1225 | */ |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 1226 | unsigned long flags; |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 1227 | #define I915_RESET_BACKOFF 0 |
| 1228 | #define I915_RESET_HANDOFF 1 |
Daniel Vetter | 9db529a | 2017-08-08 10:08:28 +0200 | [diff] [blame] | 1229 | #define I915_RESET_MODESET 2 |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 1230 | #define I915_WEDGED (BITS_PER_LONG - 1) |
Michel Thierry | 142bc7d | 2017-06-20 10:57:46 +0100 | [diff] [blame] | 1231 | #define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1232 | |
Michel Thierry | 702c8f8 | 2017-06-20 10:57:48 +0100 | [diff] [blame] | 1233 | /** Number of times an engine has been reset */ |
| 1234 | u32 reset_engine_count[I915_NUM_ENGINES]; |
| 1235 | |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1236 | /** |
Chris Wilson | 1f15b76 | 2016-07-01 17:23:14 +0100 | [diff] [blame] | 1237 | * Waitqueue to signal when a hang is detected. Used to for waiters |
| 1238 | * to release the struct_mutex for the reset to procede. |
| 1239 | */ |
| 1240 | wait_queue_head_t wait_queue; |
| 1241 | |
| 1242 | /** |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 1243 | * Waitqueue to signal when the reset has completed. Used by clients |
| 1244 | * that wait for dev_priv->mm.wedged to settle. |
| 1245 | */ |
| 1246 | wait_queue_head_t reset_queue; |
Daniel Vetter | 33196de | 2012-11-14 17:14:05 +0100 | [diff] [blame] | 1247 | |
Chris Wilson | 094f9a5 | 2013-09-25 17:34:55 +0100 | [diff] [blame] | 1248 | /* For missed irq/seqno simulation. */ |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 1249 | unsigned long test_irq_rings; |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 1250 | }; |
| 1251 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 1252 | enum modeset_restore { |
| 1253 | MODESET_ON_LID_OPEN, |
| 1254 | MODESET_DONE, |
| 1255 | MODESET_SUSPENDED, |
| 1256 | }; |
| 1257 | |
Rodrigo Vivi | 500ea70 | 2015-08-07 17:01:16 -0700 | [diff] [blame] | 1258 | #define DP_AUX_A 0x40 |
| 1259 | #define DP_AUX_B 0x10 |
| 1260 | #define DP_AUX_C 0x20 |
| 1261 | #define DP_AUX_D 0x30 |
Rodrigo Vivi | a324fca | 2018-01-29 15:22:15 -0800 | [diff] [blame] | 1262 | #define DP_AUX_F 0x60 |
Rodrigo Vivi | 500ea70 | 2015-08-07 17:01:16 -0700 | [diff] [blame] | 1263 | |
Xiong Zhang | 11c1b65 | 2015-08-17 16:04:04 +0800 | [diff] [blame] | 1264 | #define DDC_PIN_B 0x05 |
| 1265 | #define DDC_PIN_C 0x04 |
| 1266 | #define DDC_PIN_D 0x06 |
| 1267 | |
Paulo Zanoni | 6acab15 | 2013-09-12 17:06:24 -0300 | [diff] [blame] | 1268 | struct ddi_vbt_port_info { |
Ville Syrjälä | d603861 | 2017-10-30 16:57:02 +0200 | [diff] [blame] | 1269 | int max_tmds_clock; |
| 1270 | |
Damien Lespiau | ce4dd49 | 2014-08-01 11:07:54 +0100 | [diff] [blame] | 1271 | /* |
| 1272 | * This is an index in the HDMI/DVI DDI buffer translation table. |
| 1273 | * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't |
| 1274 | * populate this field. |
| 1275 | */ |
| 1276 | #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff |
Paulo Zanoni | 6acab15 | 2013-09-12 17:06:24 -0300 | [diff] [blame] | 1277 | uint8_t hdmi_level_shift; |
Paulo Zanoni | 311a209 | 2013-09-12 17:12:18 -0300 | [diff] [blame] | 1278 | |
| 1279 | uint8_t supports_dvi:1; |
| 1280 | uint8_t supports_hdmi:1; |
| 1281 | uint8_t supports_dp:1; |
Imre Deak | a98d9c1 | 2016-12-21 12:17:24 +0200 | [diff] [blame] | 1282 | uint8_t supports_edp:1; |
Rodrigo Vivi | 500ea70 | 2015-08-07 17:01:16 -0700 | [diff] [blame] | 1283 | |
| 1284 | uint8_t alternate_aux_channel; |
Xiong Zhang | 11c1b65 | 2015-08-17 16:04:04 +0800 | [diff] [blame] | 1285 | uint8_t alternate_ddc_pin; |
Antti Koskipaa | 75067dd | 2015-07-10 14:10:55 +0300 | [diff] [blame] | 1286 | |
| 1287 | uint8_t dp_boost_level; |
| 1288 | uint8_t hdmi_boost_level; |
Jani Nikula | 99b91bd | 2018-02-01 13:03:43 +0200 | [diff] [blame] | 1289 | int dp_max_link_rate; /* 0 for not limited by VBT */ |
Paulo Zanoni | 6acab15 | 2013-09-12 17:06:24 -0300 | [diff] [blame] | 1290 | }; |
| 1291 | |
Rodrigo Vivi | bfd7ebd | 2014-11-14 08:52:30 -0800 | [diff] [blame] | 1292 | enum psr_lines_to_wait { |
| 1293 | PSR_0_LINES_TO_WAIT = 0, |
| 1294 | PSR_1_LINE_TO_WAIT, |
| 1295 | PSR_4_LINES_TO_WAIT, |
| 1296 | PSR_8_LINES_TO_WAIT |
Pradeep Bhat | 83a7280 | 2014-03-28 10:14:57 +0530 | [diff] [blame] | 1297 | }; |
| 1298 | |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1299 | struct intel_vbt_data { |
| 1300 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
| 1301 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
| 1302 | |
| 1303 | /* Feature bits */ |
| 1304 | unsigned int int_tv_support:1; |
| 1305 | unsigned int lvds_dither:1; |
| 1306 | unsigned int lvds_vbt:1; |
| 1307 | unsigned int int_crt_support:1; |
| 1308 | unsigned int lvds_use_ssc:1; |
| 1309 | unsigned int display_clock_mode:1; |
| 1310 | unsigned int fdi_rx_polarity_inverted:1; |
Ville Syrjälä | 3e845c7 | 2016-04-08 16:28:12 +0300 | [diff] [blame] | 1311 | unsigned int panel_type:4; |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1312 | int lvds_ssc_freq; |
| 1313 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
| 1314 | |
Pradeep Bhat | 83a7280 | 2014-03-28 10:14:57 +0530 | [diff] [blame] | 1315 | enum drrs_support_type drrs_type; |
| 1316 | |
Jani Nikula | 6aa23e6 | 2016-03-24 17:50:20 +0200 | [diff] [blame] | 1317 | struct { |
| 1318 | int rate; |
| 1319 | int lanes; |
| 1320 | int preemphasis; |
| 1321 | int vswing; |
Jani Nikula | 06411f0 | 2016-03-24 17:50:21 +0200 | [diff] [blame] | 1322 | bool low_vswing; |
Jani Nikula | 6aa23e6 | 2016-03-24 17:50:20 +0200 | [diff] [blame] | 1323 | bool initialized; |
| 1324 | bool support; |
| 1325 | int bpp; |
| 1326 | struct edp_power_seq pps; |
| 1327 | } edp; |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1328 | |
Jani Nikula | f00076d | 2013-12-14 20:38:29 -0200 | [diff] [blame] | 1329 | struct { |
Rodrigo Vivi | bfd7ebd | 2014-11-14 08:52:30 -0800 | [diff] [blame] | 1330 | bool full_link; |
| 1331 | bool require_aux_wakeup; |
| 1332 | int idle_frames; |
| 1333 | enum psr_lines_to_wait lines_to_wait; |
| 1334 | int tp1_wakeup_time; |
| 1335 | int tp2_tp3_wakeup_time; |
| 1336 | } psr; |
| 1337 | |
| 1338 | struct { |
Jani Nikula | f00076d | 2013-12-14 20:38:29 -0200 | [diff] [blame] | 1339 | u16 pwm_freq_hz; |
Jani Nikula | 39fbc9c | 2014-04-09 11:22:06 +0300 | [diff] [blame] | 1340 | bool present; |
Jani Nikula | f00076d | 2013-12-14 20:38:29 -0200 | [diff] [blame] | 1341 | bool active_low_pwm; |
Jani Nikula | 1de6068 | 2014-06-24 18:27:39 +0300 | [diff] [blame] | 1342 | u8 min_brightness; /* min_brightness/255 of max */ |
Vidya Srinivas | add0337 | 2016-12-08 11:26:18 +0200 | [diff] [blame] | 1343 | u8 controller; /* brightness controller number */ |
Deepak M | 9a41e17 | 2016-04-26 16:14:24 +0300 | [diff] [blame] | 1344 | enum intel_backlight_type type; |
Jani Nikula | f00076d | 2013-12-14 20:38:29 -0200 | [diff] [blame] | 1345 | } backlight; |
| 1346 | |
Shobhit Kumar | d17c544 | 2013-08-27 15:12:25 +0300 | [diff] [blame] | 1347 | /* MIPI DSI */ |
| 1348 | struct { |
| 1349 | u16 panel_id; |
Shobhit Kumar | d3b542f | 2014-04-14 11:00:34 +0530 | [diff] [blame] | 1350 | struct mipi_config *config; |
| 1351 | struct mipi_pps_data *pps; |
Madhav Chauhan | 46e5832 | 2017-10-13 18:14:59 +0530 | [diff] [blame] | 1352 | u16 bl_ports; |
| 1353 | u16 cabc_ports; |
Shobhit Kumar | d3b542f | 2014-04-14 11:00:34 +0530 | [diff] [blame] | 1354 | u8 seq_version; |
| 1355 | u32 size; |
| 1356 | u8 *data; |
Jani Nikula | 8d3ed2f | 2015-12-21 15:10:57 +0200 | [diff] [blame] | 1357 | const u8 *sequence[MIPI_SEQ_MAX]; |
Shobhit Kumar | d17c544 | 2013-08-27 15:12:25 +0300 | [diff] [blame] | 1358 | } dsi; |
| 1359 | |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1360 | int crt_ddc_pin; |
| 1361 | |
| 1362 | int child_dev_num; |
Jani Nikula | cc99858 | 2017-08-24 21:54:03 +0300 | [diff] [blame] | 1363 | struct child_device_config *child_dev; |
Paulo Zanoni | 6acab15 | 2013-09-12 17:06:24 -0300 | [diff] [blame] | 1364 | |
| 1365 | struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; |
Jani Nikula | 9d6c875 | 2016-03-24 17:50:22 +0200 | [diff] [blame] | 1366 | struct sdvo_device_mapping sdvo_mappings[2]; |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1367 | }; |
| 1368 | |
Ville Syrjälä | 77c122b | 2013-08-06 22:24:04 +0300 | [diff] [blame] | 1369 | enum intel_ddb_partitioning { |
| 1370 | INTEL_DDB_PART_1_2, |
| 1371 | INTEL_DDB_PART_5_6, /* IVB+ */ |
| 1372 | }; |
| 1373 | |
Ville Syrjälä | 1fd527c | 2013-08-06 22:24:05 +0300 | [diff] [blame] | 1374 | struct intel_wm_level { |
| 1375 | bool enable; |
| 1376 | uint32_t pri_val; |
| 1377 | uint32_t spr_val; |
| 1378 | uint32_t cur_val; |
| 1379 | uint32_t fbc_val; |
| 1380 | }; |
| 1381 | |
Imre Deak | 820c198 | 2013-12-17 14:46:36 +0200 | [diff] [blame] | 1382 | struct ilk_wm_values { |
Ville Syrjälä | 609cede | 2013-10-09 19:18:03 +0300 | [diff] [blame] | 1383 | uint32_t wm_pipe[3]; |
| 1384 | uint32_t wm_lp[3]; |
| 1385 | uint32_t wm_lp_spr[3]; |
| 1386 | uint32_t wm_linetime[3]; |
| 1387 | bool enable_fbc_wm; |
| 1388 | enum intel_ddb_partitioning partitioning; |
| 1389 | }; |
| 1390 | |
Ville Syrjälä | 114d7dc | 2017-04-21 21:14:21 +0300 | [diff] [blame] | 1391 | struct g4x_pipe_wm { |
Ville Syrjälä | 1b31389 | 2016-11-28 19:37:08 +0200 | [diff] [blame] | 1392 | uint16_t plane[I915_MAX_PLANES]; |
Ville Syrjälä | 04548cb | 2017-04-21 21:14:29 +0300 | [diff] [blame] | 1393 | uint16_t fbc; |
Ville Syrjälä | 262cd2e | 2015-06-24 22:00:04 +0300 | [diff] [blame] | 1394 | }; |
| 1395 | |
Ville Syrjälä | 114d7dc | 2017-04-21 21:14:21 +0300 | [diff] [blame] | 1396 | struct g4x_sr_wm { |
Ville Syrjälä | 262cd2e | 2015-06-24 22:00:04 +0300 | [diff] [blame] | 1397 | uint16_t plane; |
Ville Syrjälä | 1b31389 | 2016-11-28 19:37:08 +0200 | [diff] [blame] | 1398 | uint16_t cursor; |
Ville Syrjälä | 04548cb | 2017-04-21 21:14:29 +0300 | [diff] [blame] | 1399 | uint16_t fbc; |
Ville Syrjälä | 1b31389 | 2016-11-28 19:37:08 +0200 | [diff] [blame] | 1400 | }; |
| 1401 | |
| 1402 | struct vlv_wm_ddl_values { |
| 1403 | uint8_t plane[I915_MAX_PLANES]; |
Ville Syrjälä | 262cd2e | 2015-06-24 22:00:04 +0300 | [diff] [blame] | 1404 | }; |
| 1405 | |
Ville Syrjälä | 0018fda | 2015-03-05 21:19:45 +0200 | [diff] [blame] | 1406 | struct vlv_wm_values { |
Ville Syrjälä | 114d7dc | 2017-04-21 21:14:21 +0300 | [diff] [blame] | 1407 | struct g4x_pipe_wm pipe[3]; |
| 1408 | struct g4x_sr_wm sr; |
Ville Syrjälä | 1b31389 | 2016-11-28 19:37:08 +0200 | [diff] [blame] | 1409 | struct vlv_wm_ddl_values ddl[3]; |
Ville Syrjälä | 6eb1a68 | 2015-06-24 22:00:03 +0300 | [diff] [blame] | 1410 | uint8_t level; |
| 1411 | bool cxsr; |
Ville Syrjälä | 0018fda | 2015-03-05 21:19:45 +0200 | [diff] [blame] | 1412 | }; |
| 1413 | |
Ville Syrjälä | 04548cb | 2017-04-21 21:14:29 +0300 | [diff] [blame] | 1414 | struct g4x_wm_values { |
| 1415 | struct g4x_pipe_wm pipe[2]; |
| 1416 | struct g4x_sr_wm sr; |
| 1417 | struct g4x_sr_wm hpll; |
| 1418 | bool cxsr; |
| 1419 | bool hpll_en; |
| 1420 | bool fbc_en; |
| 1421 | }; |
| 1422 | |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1423 | struct skl_ddb_entry { |
Damien Lespiau | 16160e3 | 2014-11-04 17:06:53 +0000 | [diff] [blame] | 1424 | uint16_t start, end; /* in number of blocks, 'end' is exclusive */ |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1425 | }; |
| 1426 | |
| 1427 | static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) |
| 1428 | { |
Damien Lespiau | 16160e3 | 2014-11-04 17:06:53 +0000 | [diff] [blame] | 1429 | return entry->end - entry->start; |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1430 | } |
| 1431 | |
Damien Lespiau | 08db665 | 2014-11-04 17:06:52 +0000 | [diff] [blame] | 1432 | static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, |
| 1433 | const struct skl_ddb_entry *e2) |
| 1434 | { |
| 1435 | if (e1->start == e2->start && e1->end == e2->end) |
| 1436 | return true; |
| 1437 | |
| 1438 | return false; |
| 1439 | } |
| 1440 | |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1441 | struct skl_ddb_allocation { |
Chandra Konduru | 2cd601c | 2015-04-27 15:47:37 -0700 | [diff] [blame] | 1442 | struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ |
Matt Roper | 4969d33 | 2015-09-24 15:53:10 -0700 | [diff] [blame] | 1443 | struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1444 | }; |
| 1445 | |
Pradeep Bhat | 2ac96d2 | 2014-11-04 17:06:40 +0000 | [diff] [blame] | 1446 | struct skl_wm_values { |
Matt Roper | 2b4b9f3 | 2016-05-12 07:06:07 -0700 | [diff] [blame] | 1447 | unsigned dirty_pipes; |
Damien Lespiau | c193924 | 2014-11-04 17:06:41 +0000 | [diff] [blame] | 1448 | struct skl_ddb_allocation ddb; |
Pradeep Bhat | 2ac96d2 | 2014-11-04 17:06:40 +0000 | [diff] [blame] | 1449 | }; |
| 1450 | |
| 1451 | struct skl_wm_level { |
Lyude | a62163e | 2016-10-04 14:28:20 -0400 | [diff] [blame] | 1452 | bool plane_en; |
| 1453 | uint16_t plane_res_b; |
| 1454 | uint8_t plane_res_l; |
Pradeep Bhat | 2ac96d2 | 2014-11-04 17:06:40 +0000 | [diff] [blame] | 1455 | }; |
| 1456 | |
Kumar, Mahesh | 7e452fd | 2017-08-17 19:15:23 +0530 | [diff] [blame] | 1457 | /* Stores plane specific WM parameters */ |
| 1458 | struct skl_wm_params { |
| 1459 | bool x_tiled, y_tiled; |
| 1460 | bool rc_surface; |
| 1461 | uint32_t width; |
| 1462 | uint8_t cpp; |
| 1463 | uint32_t plane_pixel_rate; |
| 1464 | uint32_t y_min_scanlines; |
| 1465 | uint32_t plane_bytes_per_line; |
| 1466 | uint_fixed_16_16_t plane_blocks_per_line; |
| 1467 | uint_fixed_16_16_t y_tile_minimum; |
| 1468 | uint32_t linetime_us; |
Mahesh Kumar | df8ee19 | 2018-01-30 11:49:11 -0200 | [diff] [blame] | 1469 | uint32_t dbuf_block_size; |
Kumar, Mahesh | 7e452fd | 2017-08-17 19:15:23 +0530 | [diff] [blame] | 1470 | }; |
| 1471 | |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1472 | /* |
Paulo Zanoni | 765dab67 | 2014-03-07 20:08:18 -0300 | [diff] [blame] | 1473 | * This struct helps tracking the state needed for runtime PM, which puts the |
| 1474 | * device in PCI D3 state. Notice that when this happens, nothing on the |
| 1475 | * graphics device works, even register access, so we don't get interrupts nor |
| 1476 | * anything else. |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1477 | * |
Paulo Zanoni | 765dab67 | 2014-03-07 20:08:18 -0300 | [diff] [blame] | 1478 | * Every piece of our code that needs to actually touch the hardware needs to |
| 1479 | * either call intel_runtime_pm_get or call intel_display_power_get with the |
| 1480 | * appropriate power domain. |
Paulo Zanoni | a8a8bd5 | 2014-03-07 20:08:05 -0300 | [diff] [blame] | 1481 | * |
Paulo Zanoni | 765dab67 | 2014-03-07 20:08:18 -0300 | [diff] [blame] | 1482 | * Our driver uses the autosuspend delay feature, which means we'll only really |
| 1483 | * suspend if we stay with zero refcount for a certain amount of time. The |
Daniel Vetter | f458ebb | 2014-09-30 10:56:39 +0200 | [diff] [blame] | 1484 | * default value is currently very conservative (see intel_runtime_pm_enable), but |
Paulo Zanoni | 765dab67 | 2014-03-07 20:08:18 -0300 | [diff] [blame] | 1485 | * it can be changed with the standard runtime PM files from sysfs. |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1486 | * |
| 1487 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and |
| 1488 | * goes back to false exactly before we reenable the IRQs. We use this variable |
| 1489 | * to check if someone is trying to enable/disable IRQs while they're supposed |
| 1490 | * to be disabled. This shouldn't happen and we'll print some error messages in |
Paulo Zanoni | 730488b | 2014-03-07 20:12:32 -0300 | [diff] [blame] | 1491 | * case it happens. |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1492 | * |
Paulo Zanoni | 765dab67 | 2014-03-07 20:08:18 -0300 | [diff] [blame] | 1493 | * For more, read the Documentation/power/runtime_pm.txt. |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1494 | */ |
Paulo Zanoni | 5d584b2 | 2014-03-07 20:08:15 -0300 | [diff] [blame] | 1495 | struct i915_runtime_pm { |
Imre Deak | 1f814da | 2015-12-16 02:52:19 +0200 | [diff] [blame] | 1496 | atomic_t wakeref_count; |
Paulo Zanoni | 5d584b2 | 2014-03-07 20:08:15 -0300 | [diff] [blame] | 1497 | bool suspended; |
Daniel Vetter | 2aeb7d3 | 2014-09-30 10:56:43 +0200 | [diff] [blame] | 1498 | bool irqs_enabled; |
Paulo Zanoni | c67a470 | 2013-08-19 13:18:09 -0300 | [diff] [blame] | 1499 | }; |
| 1500 | |
Daniel Vetter | 926321d | 2013-10-16 13:30:34 +0200 | [diff] [blame] | 1501 | enum intel_pipe_crc_source { |
| 1502 | INTEL_PIPE_CRC_SOURCE_NONE, |
| 1503 | INTEL_PIPE_CRC_SOURCE_PLANE1, |
| 1504 | INTEL_PIPE_CRC_SOURCE_PLANE2, |
| 1505 | INTEL_PIPE_CRC_SOURCE_PF, |
Daniel Vetter | 5b3a856 | 2013-10-16 22:55:48 +0200 | [diff] [blame] | 1506 | INTEL_PIPE_CRC_SOURCE_PIPE, |
Daniel Vetter | 3d099a0 | 2013-10-16 22:55:58 +0200 | [diff] [blame] | 1507 | /* TV/DP on pre-gen5/vlv can't use the pipe source. */ |
| 1508 | INTEL_PIPE_CRC_SOURCE_TV, |
| 1509 | INTEL_PIPE_CRC_SOURCE_DP_B, |
| 1510 | INTEL_PIPE_CRC_SOURCE_DP_C, |
| 1511 | INTEL_PIPE_CRC_SOURCE_DP_D, |
Daniel Vetter | 46a1918 | 2013-11-01 10:50:20 +0100 | [diff] [blame] | 1512 | INTEL_PIPE_CRC_SOURCE_AUTO, |
Daniel Vetter | 926321d | 2013-10-16 13:30:34 +0200 | [diff] [blame] | 1513 | INTEL_PIPE_CRC_SOURCE_MAX, |
| 1514 | }; |
| 1515 | |
Shuang He | 8bf1e9f | 2013-10-15 18:55:27 +0100 | [diff] [blame] | 1516 | struct intel_pipe_crc_entry { |
Damien Lespiau | ac2300d | 2013-10-15 18:55:30 +0100 | [diff] [blame] | 1517 | uint32_t frame; |
Shuang He | 8bf1e9f | 2013-10-15 18:55:27 +0100 | [diff] [blame] | 1518 | uint32_t crc[5]; |
| 1519 | }; |
| 1520 | |
Damien Lespiau | b2c88f5 | 2013-10-15 18:55:29 +0100 | [diff] [blame] | 1521 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 |
Shuang He | 8bf1e9f | 2013-10-15 18:55:27 +0100 | [diff] [blame] | 1522 | struct intel_pipe_crc { |
Damien Lespiau | d538bbd | 2013-10-21 14:29:30 +0100 | [diff] [blame] | 1523 | spinlock_t lock; |
| 1524 | bool opened; /* exclusive access to the result file */ |
Damien Lespiau | e5f75ac | 2013-10-15 18:55:34 +0100 | [diff] [blame] | 1525 | struct intel_pipe_crc_entry *entries; |
Daniel Vetter | 926321d | 2013-10-16 13:30:34 +0200 | [diff] [blame] | 1526 | enum intel_pipe_crc_source source; |
Damien Lespiau | d538bbd | 2013-10-21 14:29:30 +0100 | [diff] [blame] | 1527 | int head, tail; |
Damien Lespiau | 0714442 | 2013-10-15 18:55:40 +0100 | [diff] [blame] | 1528 | wait_queue_head_t wq; |
Tomeu Vizoso | 8c6b709 | 2017-01-10 14:43:04 +0100 | [diff] [blame] | 1529 | int skipped; |
Shuang He | 8bf1e9f | 2013-10-15 18:55:27 +0100 | [diff] [blame] | 1530 | }; |
| 1531 | |
Daniel Vetter | f99d706 | 2014-06-19 16:01:59 +0200 | [diff] [blame] | 1532 | struct i915_frontbuffer_tracking { |
Chris Wilson | b5add95 | 2016-08-04 16:32:36 +0100 | [diff] [blame] | 1533 | spinlock_t lock; |
Daniel Vetter | f99d706 | 2014-06-19 16:01:59 +0200 | [diff] [blame] | 1534 | |
| 1535 | /* |
| 1536 | * Tracking bits for delayed frontbuffer flushing du to gpu activity or |
| 1537 | * scheduled flips. |
| 1538 | */ |
| 1539 | unsigned busy_bits; |
| 1540 | unsigned flip_bits; |
| 1541 | }; |
| 1542 | |
Mika Kuoppala | 7225342 | 2014-10-07 17:21:26 +0300 | [diff] [blame] | 1543 | struct i915_wa_reg { |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 1544 | i915_reg_t addr; |
Mika Kuoppala | 7225342 | 2014-10-07 17:21:26 +0300 | [diff] [blame] | 1545 | u32 value; |
| 1546 | /* bitmask representing WA bits */ |
| 1547 | u32 mask; |
| 1548 | }; |
| 1549 | |
Oscar Mateo | d6242ae | 2017-10-17 13:27:51 -0700 | [diff] [blame] | 1550 | #define I915_MAX_WA_REGS 16 |
Mika Kuoppala | 7225342 | 2014-10-07 17:21:26 +0300 | [diff] [blame] | 1551 | |
| 1552 | struct i915_workarounds { |
| 1553 | struct i915_wa_reg reg[I915_MAX_WA_REGS]; |
| 1554 | u32 count; |
Tvrtko Ursulin | 666796d | 2016-03-16 11:00:39 +0000 | [diff] [blame] | 1555 | u32 hw_whitelist_count[I915_NUM_ENGINES]; |
Mika Kuoppala | 7225342 | 2014-10-07 17:21:26 +0300 | [diff] [blame] | 1556 | }; |
| 1557 | |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 1558 | struct i915_virtual_gpu { |
| 1559 | bool active; |
Tina Zhang | 8a4ab66 | 2017-08-14 15:20:46 +0800 | [diff] [blame] | 1560 | u32 caps; |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 1561 | }; |
| 1562 | |
Matt Roper | aa36313 | 2015-09-24 15:53:18 -0700 | [diff] [blame] | 1563 | /* used in computing the new watermarks state */ |
| 1564 | struct intel_wm_config { |
| 1565 | unsigned int num_pipes_active; |
| 1566 | bool sprites_enabled; |
| 1567 | bool sprites_scaled; |
| 1568 | }; |
| 1569 | |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1570 | struct i915_oa_format { |
| 1571 | u32 format; |
| 1572 | int size; |
| 1573 | }; |
| 1574 | |
Robert Bragg | 8a3003d | 2016-11-07 19:49:51 +0000 | [diff] [blame] | 1575 | struct i915_oa_reg { |
| 1576 | i915_reg_t addr; |
| 1577 | u32 value; |
| 1578 | }; |
| 1579 | |
Lionel Landwerlin | 701f823 | 2017-08-03 17:58:08 +0100 | [diff] [blame] | 1580 | struct i915_oa_config { |
| 1581 | char uuid[UUID_STRING_LEN + 1]; |
| 1582 | int id; |
| 1583 | |
| 1584 | const struct i915_oa_reg *mux_regs; |
| 1585 | u32 mux_regs_len; |
| 1586 | const struct i915_oa_reg *b_counter_regs; |
| 1587 | u32 b_counter_regs_len; |
| 1588 | const struct i915_oa_reg *flex_regs; |
| 1589 | u32 flex_regs_len; |
| 1590 | |
| 1591 | struct attribute_group sysfs_metric; |
| 1592 | struct attribute *attrs[2]; |
| 1593 | struct device_attribute sysfs_metric_id; |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 1594 | |
| 1595 | atomic_t ref_count; |
Lionel Landwerlin | 701f823 | 2017-08-03 17:58:08 +0100 | [diff] [blame] | 1596 | }; |
| 1597 | |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1598 | struct i915_perf_stream; |
| 1599 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1600 | /** |
| 1601 | * struct i915_perf_stream_ops - the OPs to support a specific stream type |
| 1602 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1603 | struct i915_perf_stream_ops { |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1604 | /** |
| 1605 | * @enable: Enables the collection of HW samples, either in response to |
| 1606 | * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened |
| 1607 | * without `I915_PERF_FLAG_DISABLED`. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1608 | */ |
| 1609 | void (*enable)(struct i915_perf_stream *stream); |
| 1610 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1611 | /** |
| 1612 | * @disable: Disables the collection of HW samples, either in response |
| 1613 | * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying |
| 1614 | * the stream. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1615 | */ |
| 1616 | void (*disable)(struct i915_perf_stream *stream); |
| 1617 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1618 | /** |
| 1619 | * @poll_wait: Call poll_wait, passing a wait queue that will be woken |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1620 | * once there is something ready to read() for the stream |
| 1621 | */ |
| 1622 | void (*poll_wait)(struct i915_perf_stream *stream, |
| 1623 | struct file *file, |
| 1624 | poll_table *wait); |
| 1625 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1626 | /** |
| 1627 | * @wait_unlocked: For handling a blocking read, wait until there is |
| 1628 | * something to ready to read() for the stream. E.g. wait on the same |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1629 | * wait queue that would be passed to poll_wait(). |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1630 | */ |
| 1631 | int (*wait_unlocked)(struct i915_perf_stream *stream); |
| 1632 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1633 | /** |
| 1634 | * @read: Copy buffered metrics as records to userspace |
| 1635 | * **buf**: the userspace, destination buffer |
| 1636 | * **count**: the number of bytes to copy, requested by userspace |
| 1637 | * **offset**: zero at the start of the read, updated as the read |
| 1638 | * proceeds, it represents how many bytes have been copied so far and |
| 1639 | * the buffer offset for copying the next record. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1640 | * |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1641 | * Copy as many buffered i915 perf samples and records for this stream |
| 1642 | * to userspace as will fit in the given buffer. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1643 | * |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1644 | * Only write complete records; returning -%ENOSPC if there isn't room |
| 1645 | * for a complete record. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1646 | * |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1647 | * Return any error condition that results in a short read such as |
| 1648 | * -%ENOSPC or -%EFAULT, even though these may be squashed before |
| 1649 | * returning to userspace. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1650 | */ |
| 1651 | int (*read)(struct i915_perf_stream *stream, |
| 1652 | char __user *buf, |
| 1653 | size_t count, |
| 1654 | size_t *offset); |
| 1655 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1656 | /** |
| 1657 | * @destroy: Cleanup any stream specific resources. |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1658 | * |
| 1659 | * The stream will always be disabled before this is called. |
| 1660 | */ |
| 1661 | void (*destroy)(struct i915_perf_stream *stream); |
| 1662 | }; |
| 1663 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1664 | /** |
| 1665 | * struct i915_perf_stream - state for a single open stream FD |
| 1666 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1667 | struct i915_perf_stream { |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1668 | /** |
| 1669 | * @dev_priv: i915 drm device |
| 1670 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1671 | struct drm_i915_private *dev_priv; |
| 1672 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1673 | /** |
| 1674 | * @link: Links the stream into ``&drm_i915_private->streams`` |
| 1675 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1676 | struct list_head link; |
| 1677 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1678 | /** |
| 1679 | * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` |
| 1680 | * properties given when opening a stream, representing the contents |
| 1681 | * of a single sample as read() by userspace. |
| 1682 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1683 | u32 sample_flags; |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1684 | |
| 1685 | /** |
| 1686 | * @sample_size: Considering the configured contents of a sample |
| 1687 | * combined with the required header size, this is the total size |
| 1688 | * of a single sample record. |
| 1689 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1690 | int sample_size; |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1691 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1692 | /** |
| 1693 | * @ctx: %NULL if measuring system-wide across all contexts or a |
| 1694 | * specific context that is being monitored. |
| 1695 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1696 | struct i915_gem_context *ctx; |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1697 | |
| 1698 | /** |
| 1699 | * @enabled: Whether the stream is currently enabled, considering |
| 1700 | * whether the stream was opened in a disabled state and based |
| 1701 | * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. |
| 1702 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1703 | bool enabled; |
| 1704 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1705 | /** |
| 1706 | * @ops: The callbacks providing the implementation of this specific |
| 1707 | * type of configured stream. |
| 1708 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1709 | const struct i915_perf_stream_ops *ops; |
Lionel Landwerlin | 701f823 | 2017-08-03 17:58:08 +0100 | [diff] [blame] | 1710 | |
| 1711 | /** |
| 1712 | * @oa_config: The OA configuration used by the stream. |
| 1713 | */ |
| 1714 | struct i915_oa_config *oa_config; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1715 | }; |
| 1716 | |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1717 | /** |
| 1718 | * struct i915_oa_ops - Gen specific implementation of an OA unit stream |
| 1719 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1720 | struct i915_oa_ops { |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1721 | /** |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 1722 | * @is_valid_b_counter_reg: Validates register's address for |
| 1723 | * programming boolean counters for a particular platform. |
| 1724 | */ |
| 1725 | bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv, |
| 1726 | u32 addr); |
| 1727 | |
| 1728 | /** |
| 1729 | * @is_valid_mux_reg: Validates register's address for programming mux |
| 1730 | * for a particular platform. |
| 1731 | */ |
| 1732 | bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr); |
| 1733 | |
| 1734 | /** |
| 1735 | * @is_valid_flex_reg: Validates register's address for programming |
| 1736 | * flex EU filtering for a particular platform. |
| 1737 | */ |
| 1738 | bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); |
| 1739 | |
| 1740 | /** |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1741 | * @init_oa_buffer: Resets the head and tail pointers of the |
| 1742 | * circular buffer for periodic OA reports. |
| 1743 | * |
| 1744 | * Called when first opening a stream for OA metrics, but also may be |
| 1745 | * called in response to an OA buffer overflow or other error |
| 1746 | * condition. |
| 1747 | * |
| 1748 | * Note it may be necessary to clear the full OA buffer here as part of |
| 1749 | * maintaining the invariable that new reports must be written to |
| 1750 | * zeroed memory for us to be able to reliable detect if an expected |
| 1751 | * report has not yet landed in memory. (At least on Haswell the OA |
| 1752 | * buffer tail pointer is not synchronized with reports being visible |
| 1753 | * to the CPU) |
| 1754 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1755 | void (*init_oa_buffer)(struct drm_i915_private *dev_priv); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1756 | |
| 1757 | /** |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 1758 | * @enable_metric_set: Selects and applies any MUX configuration to set |
| 1759 | * up the Boolean and Custom (B/C) counters that are part of the |
| 1760 | * counter reports being sampled. May apply system constraints such as |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1761 | * disabling EU clock gating as required. |
| 1762 | */ |
Lionel Landwerlin | 701f823 | 2017-08-03 17:58:08 +0100 | [diff] [blame] | 1763 | int (*enable_metric_set)(struct drm_i915_private *dev_priv, |
| 1764 | const struct i915_oa_config *oa_config); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1765 | |
| 1766 | /** |
| 1767 | * @disable_metric_set: Remove system constraints associated with using |
| 1768 | * the OA unit. |
| 1769 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1770 | void (*disable_metric_set)(struct drm_i915_private *dev_priv); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1771 | |
| 1772 | /** |
| 1773 | * @oa_enable: Enable periodic sampling |
| 1774 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1775 | void (*oa_enable)(struct drm_i915_private *dev_priv); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1776 | |
| 1777 | /** |
| 1778 | * @oa_disable: Disable periodic sampling |
| 1779 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1780 | void (*oa_disable)(struct drm_i915_private *dev_priv); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1781 | |
| 1782 | /** |
| 1783 | * @read: Copy data from the circular OA buffer into a given userspace |
| 1784 | * buffer. |
| 1785 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 1786 | int (*read)(struct i915_perf_stream *stream, |
| 1787 | char __user *buf, |
| 1788 | size_t count, |
| 1789 | size_t *offset); |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1790 | |
| 1791 | /** |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 1792 | * @oa_hw_tail_read: read the OA tail pointer register |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1793 | * |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 1794 | * In particular this enables us to share all the fiddly code for |
| 1795 | * handling the OA unit tail pointer race that affects multiple |
| 1796 | * generations. |
Robert Bragg | 16d98b3 | 2016-12-07 21:40:33 +0000 | [diff] [blame] | 1797 | */ |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 1798 | u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv); |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 1799 | }; |
| 1800 | |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 1801 | struct intel_cdclk_state { |
Imre Deak | b6c51c3 | 2018-01-17 19:25:08 +0200 | [diff] [blame] | 1802 | unsigned int cdclk, vco, ref, bypass; |
Ville Syrjälä | 64600bd | 2017-10-24 12:52:08 +0300 | [diff] [blame] | 1803 | u8 voltage_level; |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 1804 | }; |
| 1805 | |
Jani Nikula | 77fec55 | 2014-03-31 14:27:22 +0300 | [diff] [blame] | 1806 | struct drm_i915_private { |
Chris Wilson | 8f460e2 | 2016-06-24 14:00:18 +0100 | [diff] [blame] | 1807 | struct drm_device drm; |
| 1808 | |
Chris Wilson | efab6d8 | 2015-04-07 16:20:57 +0100 | [diff] [blame] | 1809 | struct kmem_cache *objects; |
Chris Wilson | e20d2ab | 2015-04-07 16:20:58 +0100 | [diff] [blame] | 1810 | struct kmem_cache *vmas; |
Chris Wilson | d1b48c1 | 2017-08-16 09:52:08 +0100 | [diff] [blame] | 1811 | struct kmem_cache *luts; |
Chris Wilson | efab6d8 | 2015-04-07 16:20:57 +0100 | [diff] [blame] | 1812 | struct kmem_cache *requests; |
Chris Wilson | 52e5420 | 2016-11-14 20:41:02 +0000 | [diff] [blame] | 1813 | struct kmem_cache *dependencies; |
Chris Wilson | c5cf9a9 | 2017-05-17 13:10:04 +0100 | [diff] [blame] | 1814 | struct kmem_cache *priorities; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1815 | |
Damien Lespiau | 5c969aa | 2014-02-07 19:12:48 +0000 | [diff] [blame] | 1816 | const struct intel_device_info info; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1817 | |
Matthew Auld | 7789422 | 2017-12-11 15:18:18 +0000 | [diff] [blame] | 1818 | /** |
| 1819 | * Data Stolen Memory - aka "i915 stolen memory" gives us the start and |
| 1820 | * end of stolen which we can optionally use to create GEM objects |
Matthew Auld | b1ace60 | 2017-12-11 15:18:21 +0000 | [diff] [blame] | 1821 | * backed by stolen memory. Note that stolen_usable_size tells us |
Matthew Auld | 7789422 | 2017-12-11 15:18:18 +0000 | [diff] [blame] | 1822 | * exactly how much of this we are actually allowed to use, given that |
| 1823 | * some portion of it is in fact reserved for use by hardware functions. |
| 1824 | */ |
| 1825 | struct resource dsm; |
Matthew Auld | 17a0534 | 2017-12-11 15:18:19 +0000 | [diff] [blame] | 1826 | /** |
| 1827 | * Reseved portion of Data Stolen Memory |
| 1828 | */ |
| 1829 | struct resource dsm_reserved; |
Matthew Auld | 7789422 | 2017-12-11 15:18:18 +0000 | [diff] [blame] | 1830 | |
Matthew Auld | b1ace60 | 2017-12-11 15:18:21 +0000 | [diff] [blame] | 1831 | /* |
| 1832 | * Stolen memory is segmented in hardware with different portions |
| 1833 | * offlimits to certain functions. |
| 1834 | * |
| 1835 | * The drm_mm is initialised to the total accessible range, as found |
| 1836 | * from the PCI config. On Broadwell+, this is further restricted to |
| 1837 | * avoid the first page! The upper end of stolen memory is reserved for |
| 1838 | * hardware functions and similarly removed from the accessible range. |
| 1839 | */ |
Matthew Auld | b7128ef | 2017-12-11 15:18:22 +0000 | [diff] [blame] | 1840 | resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ |
Matthew Auld | b1ace60 | 2017-12-11 15:18:21 +0000 | [diff] [blame] | 1841 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1842 | void __iomem *regs; |
| 1843 | |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 1844 | struct intel_uncore uncore; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1845 | |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 1846 | struct i915_virtual_gpu vgpu; |
| 1847 | |
Zhenyu Wang | feddf6e | 2016-10-20 17:15:03 +0800 | [diff] [blame] | 1848 | struct intel_gvt *gvt; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 1849 | |
Anusha Srivatsa | bd13285 | 2017-01-18 08:05:53 -0800 | [diff] [blame] | 1850 | struct intel_huc huc; |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 1851 | struct intel_guc guc; |
| 1852 | |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 1853 | struct intel_csr csr; |
| 1854 | |
Jani Nikula | 5ea6e5e | 2015-04-01 10:55:04 +0300 | [diff] [blame] | 1855 | struct intel_gmbus gmbus[GMBUS_NUM_PINS]; |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1856 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1857 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
| 1858 | * controller on different i2c buses. */ |
| 1859 | struct mutex gmbus_mutex; |
| 1860 | |
| 1861 | /** |
| 1862 | * Base address of the gmbus and gpio block. |
| 1863 | */ |
| 1864 | uint32_t gpio_mmio_base; |
| 1865 | |
Shashank Sharma | b6fdd0f | 2014-05-19 20:54:03 +0530 | [diff] [blame] | 1866 | /* MMIO base address for MIPI regs */ |
| 1867 | uint32_t mipi_mmio_base; |
| 1868 | |
Ville Syrjälä | 443a389 | 2015-11-11 20:34:15 +0200 | [diff] [blame] | 1869 | uint32_t psr_mmio_base; |
| 1870 | |
Imre Deak | 44cb734 | 2016-08-10 14:07:29 +0300 | [diff] [blame] | 1871 | uint32_t pps_mmio_base; |
| 1872 | |
Daniel Vetter | 28c70f1 | 2012-12-01 13:53:45 +0100 | [diff] [blame] | 1873 | wait_queue_head_t gmbus_wait_queue; |
| 1874 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1875 | struct pci_dev *bridge_dev; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 1876 | struct intel_engine_cs *engine[I915_NUM_ENGINES]; |
Chris Wilson | e7af311 | 2017-10-03 21:34:48 +0100 | [diff] [blame] | 1877 | /* Context used internally to idle the GPU and setup initial state */ |
| 1878 | struct i915_gem_context *kernel_context; |
| 1879 | /* Context only to be used for injecting preemption commands */ |
| 1880 | struct i915_gem_context *preempt_context; |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 1881 | struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] |
| 1882 | [MAX_ENGINE_INSTANCE + 1]; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1883 | |
Daniel Vetter | ba8286f | 2014-09-11 07:43:25 +0200 | [diff] [blame] | 1884 | struct drm_dma_handle *status_page_dmah; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1885 | struct resource mch_res; |
| 1886 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1887 | /* protects the irq masks */ |
| 1888 | spinlock_t irq_lock; |
| 1889 | |
Imre Deak | f8b79e5 | 2014-03-04 19:23:07 +0200 | [diff] [blame] | 1890 | bool display_irqs_enabled; |
| 1891 | |
Daniel Vetter | 9ee32fea | 2012-12-01 13:53:48 +0100 | [diff] [blame] | 1892 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
| 1893 | struct pm_qos_request pm_qos; |
| 1894 | |
Ville Syrjälä | a580516 | 2015-05-26 20:42:30 +0300 | [diff] [blame] | 1895 | /* Sideband mailbox protection */ |
| 1896 | struct mutex sb_lock; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1897 | |
| 1898 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
Ben Widawsky | abd58f0 | 2013-11-02 21:07:09 -0700 | [diff] [blame] | 1899 | union { |
| 1900 | u32 irq_mask; |
| 1901 | u32 de_irq_mask[I915_MAX_PIPES]; |
| 1902 | }; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1903 | u32 gt_irq_mask; |
Akash Goel | f4e9af4 | 2016-10-12 21:54:30 +0530 | [diff] [blame] | 1904 | u32 pm_imr; |
| 1905 | u32 pm_ier; |
Deepak S | a6706b4 | 2014-03-15 20:23:22 +0530 | [diff] [blame] | 1906 | u32 pm_rps_events; |
Sagar Arun Kamble | 26705e2 | 2016-10-12 21:54:31 +0530 | [diff] [blame] | 1907 | u32 pm_guc_events; |
Imre Deak | 91d181d | 2014-02-10 18:42:49 +0200 | [diff] [blame] | 1908 | u32 pipestat_irq_mask[I915_MAX_PIPES]; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1909 | |
Jani Nikula | 5fcece8 | 2015-05-27 15:03:42 +0300 | [diff] [blame] | 1910 | struct i915_hotplug hotplug; |
Paulo Zanoni | ab34a7e | 2016-01-11 17:44:36 -0200 | [diff] [blame] | 1911 | struct intel_fbc fbc; |
Pradeep Bhat | 439d7ac | 2014-04-05 12:13:28 +0530 | [diff] [blame] | 1912 | struct i915_drrs drrs; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1913 | struct intel_opregion opregion; |
Rodrigo Vivi | 41aa344 | 2013-05-09 20:03:18 -0300 | [diff] [blame] | 1914 | struct intel_vbt_data vbt; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1915 | |
Jesse Barnes | d9ceb81 | 2014-10-09 12:57:43 -0700 | [diff] [blame] | 1916 | bool preserve_bios_swizzle; |
| 1917 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1918 | /* overlay */ |
| 1919 | struct intel_overlay *overlay; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1920 | |
Jani Nikula | 58c6877 | 2013-11-08 16:48:54 +0200 | [diff] [blame] | 1921 | /* backlight registers and fields in struct intel_panel */ |
Daniel Vetter | 07f11d4 | 2014-09-15 14:35:09 +0200 | [diff] [blame] | 1922 | struct mutex backlight_lock; |
Jani Nikula | 31ad8ec | 2013-04-02 15:48:09 +0300 | [diff] [blame] | 1923 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1924 | /* LVDS info */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1925 | bool no_aux_handshake; |
| 1926 | |
Ville Syrjälä | e39b999 | 2014-09-04 14:53:14 +0300 | [diff] [blame] | 1927 | /* protects panel power sequencer state */ |
| 1928 | struct mutex pps_mutex; |
| 1929 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1930 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1931 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
| 1932 | |
| 1933 | unsigned int fsb_freq, mem_freq, is_ddr3; |
Ville Syrjälä | b204535 | 2016-05-13 23:41:27 +0300 | [diff] [blame] | 1934 | unsigned int skl_preferred_vco_freq; |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 1935 | unsigned int max_cdclk_freq; |
Ville Syrjälä | 8d96561 | 2016-11-14 18:35:10 +0200 | [diff] [blame] | 1936 | |
Mika Kahola | adafdc6 | 2015-08-18 14:36:59 +0300 | [diff] [blame] | 1937 | unsigned int max_dotclk_freq; |
Ville Syrjälä | e7dc33f | 2016-03-02 17:22:13 +0200 | [diff] [blame] | 1938 | unsigned int rawclk_freq; |
Ville Syrjälä | 6bcda4f | 2014-10-07 17:41:22 +0300 | [diff] [blame] | 1939 | unsigned int hpll_freq; |
Chris Wilson | 58ecd9d | 2017-11-05 13:49:05 +0000 | [diff] [blame] | 1940 | unsigned int fdi_pll_freq; |
Ville Syrjälä | bfa7df0 | 2015-09-24 23:29:18 +0300 | [diff] [blame] | 1941 | unsigned int czclk_freq; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1942 | |
Ville Syrjälä | 63911d7 | 2016-05-13 23:41:32 +0300 | [diff] [blame] | 1943 | struct { |
Ville Syrjälä | bb0f4aa | 2017-01-20 20:21:59 +0200 | [diff] [blame] | 1944 | /* |
| 1945 | * The current logical cdclk state. |
| 1946 | * See intel_atomic_state.cdclk.logical |
| 1947 | * |
| 1948 | * For reading holding any crtc lock is sufficient, |
| 1949 | * for writing must hold all of them. |
| 1950 | */ |
| 1951 | struct intel_cdclk_state logical; |
| 1952 | /* |
| 1953 | * The current actual cdclk state. |
| 1954 | * See intel_atomic_state.cdclk.actual |
| 1955 | */ |
| 1956 | struct intel_cdclk_state actual; |
| 1957 | /* The current hardware cdclk state */ |
Ville Syrjälä | 49cd97a | 2017-02-07 20:33:45 +0200 | [diff] [blame] | 1958 | struct intel_cdclk_state hw; |
| 1959 | } cdclk; |
Ville Syrjälä | 63911d7 | 2016-05-13 23:41:32 +0300 | [diff] [blame] | 1960 | |
Daniel Vetter | 645416f | 2013-09-02 16:22:25 +0200 | [diff] [blame] | 1961 | /** |
| 1962 | * wq - Driver workqueue for GEM. |
| 1963 | * |
| 1964 | * NOTE: Work items scheduled here are not allowed to grab any modeset |
| 1965 | * locks, for otherwise the flushing done in the pageflip code will |
| 1966 | * result in deadlocks. |
| 1967 | */ |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1968 | struct workqueue_struct *wq; |
| 1969 | |
Ville Syrjälä | 757fffc | 2017-11-13 15:36:22 +0200 | [diff] [blame] | 1970 | /* ordered wq for modesets */ |
| 1971 | struct workqueue_struct *modeset_wq; |
| 1972 | |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1973 | /* Display functions */ |
| 1974 | struct drm_i915_display_funcs display; |
| 1975 | |
| 1976 | /* PCH chipset type */ |
| 1977 | enum intel_pch pch_type; |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 1978 | unsigned short pch_id; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 1979 | |
| 1980 | unsigned long quirks; |
| 1981 | |
Zhang Rui | b8efb17 | 2013-02-05 15:41:53 +0800 | [diff] [blame] | 1982 | enum modeset_restore modeset_restore; |
| 1983 | struct mutex modeset_restore_lock; |
Maarten Lankhorst | e2c8b87 | 2016-02-16 10:06:14 +0100 | [diff] [blame] | 1984 | struct drm_atomic_state *modeset_restore_state; |
Maarten Lankhorst | 7397489 | 2016-08-05 23:28:27 +0300 | [diff] [blame] | 1985 | struct drm_modeset_acquire_ctx reset_ctx; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1986 | |
Ben Widawsky | a7bbbd6 | 2013-07-16 16:50:07 -0700 | [diff] [blame] | 1987 | struct list_head vm_list; /* Global list of all address spaces */ |
Joonas Lahtinen | 62106b4 | 2016-03-18 10:42:57 +0200 | [diff] [blame] | 1988 | struct i915_ggtt ggtt; /* VM representing the global address space */ |
Ben Widawsky | 5d4545a | 2013-01-17 12:45:15 -0800 | [diff] [blame] | 1989 | |
Daniel Vetter | 4b5aed6 | 2012-11-14 17:14:03 +0100 | [diff] [blame] | 1990 | struct i915_gem_mm mm; |
Chris Wilson | ad46cb5 | 2014-08-07 14:20:40 +0100 | [diff] [blame] | 1991 | DECLARE_HASHTABLE(mm_structs, 7); |
| 1992 | struct mutex mm_lock; |
Daniel Vetter | 8781342 | 2012-05-02 11:49:32 +0200 | [diff] [blame] | 1993 | |
Zhi Wang | 4395890 | 2017-09-14 20:39:40 +0800 | [diff] [blame] | 1994 | struct intel_ppat ppat; |
| 1995 | |
Daniel Vetter | 8781342 | 2012-05-02 11:49:32 +0200 | [diff] [blame] | 1996 | /* Kernel Modesetting */ |
| 1997 | |
Ville Syrjälä | e2af48c | 2016-10-31 22:37:05 +0200 | [diff] [blame] | 1998 | struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; |
| 1999 | struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; |
Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 2000 | |
Daniel Vetter | c459787 | 2013-10-21 21:04:07 +0200 | [diff] [blame] | 2001 | #ifdef CONFIG_DEBUG_FS |
| 2002 | struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; |
| 2003 | #endif |
| 2004 | |
Maarten Lankhorst | 565602d | 2015-12-10 12:33:57 +0100 | [diff] [blame] | 2005 | /* dpll and cdclk state is protected by connection_mutex */ |
Daniel Vetter | e72f9fb | 2013-06-05 13:34:06 +0200 | [diff] [blame] | 2006 | int num_shared_dpll; |
| 2007 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; |
Ander Conselvan de Oliveira | f9476a6 | 2016-03-08 17:46:22 +0200 | [diff] [blame] | 2008 | const struct intel_dpll_mgr *dpll_mgr; |
Maarten Lankhorst | 565602d | 2015-12-10 12:33:57 +0100 | [diff] [blame] | 2009 | |
Maarten Lankhorst | fbf6d87 | 2016-03-23 14:51:12 +0100 | [diff] [blame] | 2010 | /* |
| 2011 | * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. |
| 2012 | * Must be global rather than per dpll, because on some platforms |
| 2013 | * plls share registers. |
| 2014 | */ |
| 2015 | struct mutex dpll_lock; |
| 2016 | |
Maarten Lankhorst | 565602d | 2015-12-10 12:33:57 +0100 | [diff] [blame] | 2017 | unsigned int active_crtcs; |
Ville Syrjälä | d305e06 | 2017-08-30 21:57:03 +0300 | [diff] [blame] | 2018 | /* minimum acceptable cdclk for each pipe */ |
| 2019 | int min_cdclk[I915_MAX_PIPES]; |
Ville Syrjälä | 53e9bf5 | 2017-10-24 12:52:14 +0300 | [diff] [blame] | 2020 | /* minimum acceptable voltage level for each pipe */ |
| 2021 | u8 min_voltage_level[I915_MAX_PIPES]; |
Maarten Lankhorst | 565602d | 2015-12-10 12:33:57 +0100 | [diff] [blame] | 2022 | |
Chon Ming Lee | e4607fc | 2013-11-06 14:36:35 +0800 | [diff] [blame] | 2023 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
Jesse Barnes | ee7b9f9 | 2012-04-20 17:11:53 +0100 | [diff] [blame] | 2024 | |
Mika Kuoppala | 7225342 | 2014-10-07 17:21:26 +0300 | [diff] [blame] | 2025 | struct i915_workarounds workarounds; |
Arun Siluvery | 888b599 | 2014-08-26 14:44:51 +0100 | [diff] [blame] | 2026 | |
Daniel Vetter | f99d706 | 2014-06-19 16:01:59 +0200 | [diff] [blame] | 2027 | struct i915_frontbuffer_tracking fb_tracking; |
| 2028 | |
Chris Wilson | eb955ee | 2017-01-23 21:29:39 +0000 | [diff] [blame] | 2029 | struct intel_atomic_helper { |
| 2030 | struct llist_head free_list; |
| 2031 | struct work_struct free_work; |
| 2032 | } atomic_helper; |
| 2033 | |
Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 2034 | u16 orig_clock; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 2035 | |
Zhenyu Wang | c4804411 | 2009-12-17 14:48:43 +0800 | [diff] [blame] | 2036 | bool mchbar_need_disable; |
Jesse Barnes | f97108d | 2010-01-29 11:27:07 -0800 | [diff] [blame] | 2037 | |
Daniel Vetter | a4da4fa | 2012-11-02 19:55:07 +0100 | [diff] [blame] | 2038 | struct intel_l3_parity l3_parity; |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 2039 | |
Ben Widawsky | 5912450 | 2013-07-04 11:02:05 -0700 | [diff] [blame] | 2040 | /* Cannot be determined by PCIID. You must always read a register. */ |
Mika Kuoppala | 3accaf7 | 2016-04-13 17:26:43 +0300 | [diff] [blame] | 2041 | u32 edram_cap; |
Ben Widawsky | 5912450 | 2013-07-04 11:02:05 -0700 | [diff] [blame] | 2042 | |
Sagar Arun Kamble | 9f81750 | 2017-10-10 22:30:05 +0100 | [diff] [blame] | 2043 | /* |
| 2044 | * Protects RPS/RC6 register access and PCU communication. |
| 2045 | * Must be taken after struct_mutex if nested. Note that |
| 2046 | * this lock may be held for long periods of time when |
| 2047 | * talking to hw - so only take it when talking to hw! |
| 2048 | */ |
| 2049 | struct mutex pcu_lock; |
| 2050 | |
Sagar Arun Kamble | 562d9ba | 2017-10-10 22:30:06 +0100 | [diff] [blame] | 2051 | /* gen6+ GT PM state */ |
| 2052 | struct intel_gen6_power_mgmt gt_pm; |
Daniel Vetter | c6a828d | 2012-08-08 23:35:35 +0200 | [diff] [blame] | 2053 | |
Daniel Vetter | 20e4d40 | 2012-08-08 23:35:39 +0200 | [diff] [blame] | 2054 | /* ilk-only ips/rps state. Everything in here is protected by the global |
| 2055 | * mchdev_lock in intel_pm.c */ |
Daniel Vetter | c85aa88 | 2012-11-02 19:55:03 +0100 | [diff] [blame] | 2056 | struct intel_ilk_power_mgmt ips; |
Jesse Barnes | b5e50c3 | 2010-02-05 12:42:41 -0800 | [diff] [blame] | 2057 | |
Imre Deak | 83c00f5 | 2013-10-25 17:36:47 +0300 | [diff] [blame] | 2058 | struct i915_power_domains power_domains; |
Wang Xingchao | a38911a | 2013-05-30 22:07:11 +0800 | [diff] [blame] | 2059 | |
Rodrigo Vivi | a031d70 | 2013-10-03 16:15:06 -0300 | [diff] [blame] | 2060 | struct i915_psr psr; |
Rodrigo Vivi | 3f51e47 | 2013-07-11 18:45:00 -0300 | [diff] [blame] | 2061 | |
Daniel Vetter | 99584db | 2012-11-14 17:14:04 +0100 | [diff] [blame] | 2062 | struct i915_gpu_error gpu_error; |
Chris Wilson | ae681d9 | 2010-10-01 14:57:56 +0100 | [diff] [blame] | 2063 | |
Jesse Barnes | c9cddff | 2013-05-08 10:45:13 -0700 | [diff] [blame] | 2064 | struct drm_i915_gem_object *vlv_pctx; |
| 2065 | |
Dave Airlie | 8be48d9 | 2010-03-30 05:34:14 +0000 | [diff] [blame] | 2066 | /* list of fbdev register on this device */ |
| 2067 | struct intel_fbdev *fbdev; |
Chris Wilson | 82e3b8c | 2014-08-13 13:09:46 +0100 | [diff] [blame] | 2068 | struct work_struct fbdev_suspend_work; |
Chris Wilson | e953fd7 | 2011-02-21 22:23:52 +0000 | [diff] [blame] | 2069 | |
| 2070 | struct drm_property *broadcast_rgb_property; |
Chris Wilson | 3f43c48 | 2011-05-12 22:17:24 +0100 | [diff] [blame] | 2071 | struct drm_property *force_audio_property; |
Ben Widawsky | e368919 | 2012-05-25 16:56:22 -0700 | [diff] [blame] | 2072 | |
Imre Deak | 58fddc2 | 2015-01-08 17:54:14 +0200 | [diff] [blame] | 2073 | /* hda/i915 audio component */ |
David Henningsson | 51e1d83 | 2015-08-19 10:48:56 +0200 | [diff] [blame] | 2074 | struct i915_audio_component *audio_component; |
Imre Deak | 58fddc2 | 2015-01-08 17:54:14 +0200 | [diff] [blame] | 2075 | bool audio_component_registered; |
Libin Yang | 4a21ef7 | 2015-09-02 14:11:39 +0800 | [diff] [blame] | 2076 | /** |
| 2077 | * av_mutex - mutex for audio/video sync |
| 2078 | * |
| 2079 | */ |
| 2080 | struct mutex av_mutex; |
Imre Deak | 58fddc2 | 2015-01-08 17:54:14 +0200 | [diff] [blame] | 2081 | |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 2082 | struct { |
| 2083 | struct list_head list; |
Chris Wilson | 5f09a9c | 2017-06-20 12:05:46 +0100 | [diff] [blame] | 2084 | struct llist_head free_list; |
| 2085 | struct work_struct free_work; |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 2086 | |
| 2087 | /* The hw wants to have a stable context identifier for the |
| 2088 | * lifetime of the context (for OA, PASID, faults, etc). |
| 2089 | * This is limited in execlists to 21 bits. |
| 2090 | */ |
| 2091 | struct ida hw_ida; |
| 2092 | #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ |
| 2093 | } contexts; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 2094 | |
Damien Lespiau | 3e68320 | 2012-12-11 18:48:29 +0000 | [diff] [blame] | 2095 | u32 fdi_rx_config; |
Paulo Zanoni | 68d18ad | 2012-12-01 12:04:26 -0200 | [diff] [blame] | 2096 | |
Ville Syrjälä | c231775 | 2016-03-15 16:39:56 +0200 | [diff] [blame] | 2097 | /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ |
Ville Syrjälä | 7072246 | 2015-04-10 18:21:28 +0300 | [diff] [blame] | 2098 | u32 chv_phy_control; |
Ville Syrjälä | c231775 | 2016-03-15 16:39:56 +0200 | [diff] [blame] | 2099 | /* |
| 2100 | * Shadows for CHV DPLL_MD regs to keep the state |
| 2101 | * checker somewhat working in the presence hardware |
| 2102 | * crappiness (can't read out DPLL_MD for pipes B & C). |
| 2103 | */ |
| 2104 | u32 chv_dpll_md[I915_MAX_PIPES]; |
Imre Deak | adc7f04 | 2016-04-04 17:27:10 +0300 | [diff] [blame] | 2105 | u32 bxt_phy_grc; |
Ville Syrjälä | 7072246 | 2015-04-10 18:21:28 +0300 | [diff] [blame] | 2106 | |
Daniel Vetter | 842f1c8 | 2014-03-10 10:01:44 +0100 | [diff] [blame] | 2107 | u32 suspend_count; |
Imre Deak | bc87229 | 2015-11-18 17:32:30 +0200 | [diff] [blame] | 2108 | bool suspended_to_idle; |
Daniel Vetter | f4c956a | 2012-11-02 19:55:02 +0100 | [diff] [blame] | 2109 | struct i915_suspend_saved_registers regfile; |
Imre Deak | ddeea5b | 2014-05-05 15:19:56 +0300 | [diff] [blame] | 2110 | struct vlv_s0ix_state vlv_s0ix_state; |
Daniel Vetter | 231f42a | 2012-11-02 19:55:05 +0100 | [diff] [blame] | 2111 | |
Lyude | 656d1b8 | 2016-08-17 15:55:54 -0400 | [diff] [blame] | 2112 | enum { |
Paulo Zanoni | 16dcdc4 | 2016-09-22 18:00:27 -0300 | [diff] [blame] | 2113 | I915_SAGV_UNKNOWN = 0, |
| 2114 | I915_SAGV_DISABLED, |
| 2115 | I915_SAGV_ENABLED, |
| 2116 | I915_SAGV_NOT_CONTROLLED |
| 2117 | } sagv_status; |
Lyude | 656d1b8 | 2016-08-17 15:55:54 -0400 | [diff] [blame] | 2118 | |
Ville Syrjälä | 53615a5 | 2013-08-01 16:18:50 +0300 | [diff] [blame] | 2119 | struct { |
| 2120 | /* |
| 2121 | * Raw watermark latency values: |
| 2122 | * in 0.1us units for WM0, |
| 2123 | * in 0.5us units for WM1+. |
| 2124 | */ |
| 2125 | /* primary */ |
| 2126 | uint16_t pri_latency[5]; |
| 2127 | /* sprite */ |
| 2128 | uint16_t spr_latency[5]; |
| 2129 | /* cursor */ |
| 2130 | uint16_t cur_latency[5]; |
Pradeep Bhat | 2af30a5 | 2014-11-04 17:06:38 +0000 | [diff] [blame] | 2131 | /* |
| 2132 | * Raw watermark memory latency values |
| 2133 | * for SKL for all 8 levels |
| 2134 | * in 1us units. |
| 2135 | */ |
| 2136 | uint16_t skl_latency[8]; |
Ville Syrjälä | 609cede | 2013-10-09 19:18:03 +0300 | [diff] [blame] | 2137 | |
| 2138 | /* current hardware state */ |
Pradeep Bhat | 2d41c0b | 2014-11-04 17:06:42 +0000 | [diff] [blame] | 2139 | union { |
| 2140 | struct ilk_wm_values hw; |
| 2141 | struct skl_wm_values skl_hw; |
Ville Syrjälä | 0018fda | 2015-03-05 21:19:45 +0200 | [diff] [blame] | 2142 | struct vlv_wm_values vlv; |
Ville Syrjälä | 04548cb | 2017-04-21 21:14:29 +0300 | [diff] [blame] | 2143 | struct g4x_wm_values g4x; |
Pradeep Bhat | 2d41c0b | 2014-11-04 17:06:42 +0000 | [diff] [blame] | 2144 | }; |
Ville Syrjälä | 58590c1 | 2015-09-08 21:05:12 +0300 | [diff] [blame] | 2145 | |
| 2146 | uint8_t max_level; |
Matt Roper | ed4a6a7 | 2016-02-23 17:20:13 -0800 | [diff] [blame] | 2147 | |
| 2148 | /* |
| 2149 | * Should be held around atomic WM register writing; also |
| 2150 | * protects * intel_crtc->wm.active and |
| 2151 | * cstate->wm.need_postvbl_update. |
| 2152 | */ |
| 2153 | struct mutex wm_mutex; |
Matt Roper | 279e99d | 2016-05-12 07:06:02 -0700 | [diff] [blame] | 2154 | |
| 2155 | /* |
| 2156 | * Set during HW readout of watermarks/DDB. Some platforms |
| 2157 | * need to know when we're still using BIOS-provided values |
| 2158 | * (which we don't fully trust). |
| 2159 | */ |
| 2160 | bool distrust_bios_wm; |
Ville Syrjälä | 53615a5 | 2013-08-01 16:18:50 +0300 | [diff] [blame] | 2161 | } wm; |
| 2162 | |
Sagar Arun Kamble | ad1443f | 2017-10-10 22:30:04 +0100 | [diff] [blame] | 2163 | struct i915_runtime_pm runtime_pm; |
Paulo Zanoni | 8a18745 | 2013-12-06 20:32:13 -0200 | [diff] [blame] | 2164 | |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 2165 | struct { |
| 2166 | bool initialized; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2167 | |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 2168 | struct kobject *metrics_kobj; |
Robert Bragg | ccdf634 | 2016-11-07 19:49:54 +0000 | [diff] [blame] | 2169 | struct ctl_table_header *sysctl_header; |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 2170 | |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 2171 | /* |
| 2172 | * Lock associated with adding/modifying/removing OA configs |
| 2173 | * in dev_priv->perf.metrics_idr. |
| 2174 | */ |
| 2175 | struct mutex metrics_lock; |
| 2176 | |
| 2177 | /* |
| 2178 | * List of dynamic configurations, you need to hold |
| 2179 | * dev_priv->perf.metrics_lock to access it. |
| 2180 | */ |
| 2181 | struct idr metrics_idr; |
| 2182 | |
| 2183 | /* |
| 2184 | * Lock associated with anything below within this structure |
| 2185 | * except exclusive_stream. |
| 2186 | */ |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 2187 | struct mutex lock; |
| 2188 | struct list_head streams; |
Robert Bragg | 8a3003d | 2016-11-07 19:49:51 +0000 | [diff] [blame] | 2189 | |
| 2190 | struct { |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 2191 | /* |
| 2192 | * The stream currently using the OA unit. If accessed |
| 2193 | * outside a syscall associated to its file |
| 2194 | * descriptor, you need to hold |
| 2195 | * dev_priv->drm.struct_mutex. |
| 2196 | */ |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2197 | struct i915_perf_stream *exclusive_stream; |
| 2198 | |
| 2199 | u32 specific_ctx_id; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2200 | |
| 2201 | struct hrtimer poll_check_timer; |
| 2202 | wait_queue_head_t poll_wq; |
| 2203 | bool pollin; |
| 2204 | |
Robert Bragg | 712122e | 2017-05-11 16:43:31 +0100 | [diff] [blame] | 2205 | /** |
| 2206 | * For rate limiting any notifications of spurious |
| 2207 | * invalid OA reports |
| 2208 | */ |
| 2209 | struct ratelimit_state spurious_report_rs; |
| 2210 | |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2211 | bool periodic; |
| 2212 | int period_exponent; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2213 | |
Lionel Landwerlin | 701f823 | 2017-08-03 17:58:08 +0100 | [diff] [blame] | 2214 | struct i915_oa_config test_config; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2215 | |
| 2216 | struct { |
| 2217 | struct i915_vma *vma; |
| 2218 | u8 *vaddr; |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 2219 | u32 last_ctx_id; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2220 | int format; |
| 2221 | int format_size; |
Robert Bragg | f279020 | 2017-05-11 16:43:26 +0100 | [diff] [blame] | 2222 | |
| 2223 | /** |
Robert Bragg | 0dd860c | 2017-05-11 16:43:28 +0100 | [diff] [blame] | 2224 | * Locks reads and writes to all head/tail state |
| 2225 | * |
| 2226 | * Consider: the head and tail pointer state |
| 2227 | * needs to be read consistently from a hrtimer |
| 2228 | * callback (atomic context) and read() fop |
| 2229 | * (user context) with tail pointer updates |
| 2230 | * happening in atomic context and head updates |
| 2231 | * in user context and the (unlikely) |
| 2232 | * possibility of read() errors needing to |
| 2233 | * reset all head/tail state. |
| 2234 | * |
| 2235 | * Note: Contention or performance aren't |
| 2236 | * currently a significant concern here |
| 2237 | * considering the relatively low frequency of |
| 2238 | * hrtimer callbacks (5ms period) and that |
| 2239 | * reads typically only happen in response to a |
| 2240 | * hrtimer event and likely complete before the |
| 2241 | * next callback. |
| 2242 | * |
| 2243 | * Note: This lock is not held *while* reading |
| 2244 | * and copying data to userspace so the value |
| 2245 | * of head observed in htrimer callbacks won't |
| 2246 | * represent any partial consumption of data. |
| 2247 | */ |
| 2248 | spinlock_t ptr_lock; |
| 2249 | |
| 2250 | /** |
| 2251 | * One 'aging' tail pointer and one 'aged' |
| 2252 | * tail pointer ready to used for reading. |
| 2253 | * |
| 2254 | * Initial values of 0xffffffff are invalid |
| 2255 | * and imply that an update is required |
| 2256 | * (and should be ignored by an attempted |
| 2257 | * read) |
| 2258 | */ |
| 2259 | struct { |
| 2260 | u32 offset; |
| 2261 | } tails[2]; |
| 2262 | |
| 2263 | /** |
| 2264 | * Index for the aged tail ready to read() |
| 2265 | * data up to. |
| 2266 | */ |
| 2267 | unsigned int aged_tail_idx; |
| 2268 | |
| 2269 | /** |
| 2270 | * A monotonic timestamp for when the current |
| 2271 | * aging tail pointer was read; used to |
| 2272 | * determine when it is old enough to trust. |
| 2273 | */ |
| 2274 | u64 aging_timestamp; |
| 2275 | |
| 2276 | /** |
Robert Bragg | f279020 | 2017-05-11 16:43:26 +0100 | [diff] [blame] | 2277 | * Although we can always read back the head |
| 2278 | * pointer register, we prefer to avoid |
| 2279 | * trusting the HW state, just to avoid any |
| 2280 | * risk that some hardware condition could |
| 2281 | * somehow bump the head pointer unpredictably |
| 2282 | * and cause us to forward the wrong OA buffer |
| 2283 | * data to userspace. |
| 2284 | */ |
| 2285 | u32 head; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2286 | } oa_buffer; |
| 2287 | |
| 2288 | u32 gen7_latched_oastatus1; |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 2289 | u32 ctx_oactxctrl_offset; |
| 2290 | u32 ctx_flexeu0_offset; |
| 2291 | |
| 2292 | /** |
| 2293 | * The RPT_ID/reason field for Gen8+ includes a bit |
| 2294 | * to determine if the CTX ID in the report is valid |
| 2295 | * but the specific bit differs between Gen 8 and 9 |
| 2296 | */ |
| 2297 | u32 gen8_valid_ctx_bit; |
Robert Bragg | d796515 | 2016-11-07 19:49:52 +0000 | [diff] [blame] | 2298 | |
| 2299 | struct i915_oa_ops ops; |
| 2300 | const struct i915_oa_format *oa_formats; |
Robert Bragg | 8a3003d | 2016-11-07 19:49:51 +0000 | [diff] [blame] | 2301 | } oa; |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 2302 | } perf; |
| 2303 | |
Oscar Mateo | a83014d | 2014-07-24 17:04:21 +0100 | [diff] [blame] | 2304 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
| 2305 | struct { |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 2306 | void (*resume)(struct drm_i915_private *); |
Tvrtko Ursulin | 117897f | 2016-03-16 11:00:40 +0000 | [diff] [blame] | 2307 | void (*cleanup_engine)(struct intel_engine_cs *engine); |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2308 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2309 | struct list_head timelines; |
| 2310 | struct i915_gem_timeline global_timeline; |
Chris Wilson | 28176ef | 2016-10-28 13:58:56 +0100 | [diff] [blame] | 2311 | u32 active_requests; |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 2312 | |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2313 | /** |
| 2314 | * Is the GPU currently considered idle, or busy executing |
| 2315 | * userspace requests? Whilst idle, we allow runtime power |
| 2316 | * management to power down the hardware and display clocks. |
| 2317 | * In order to reduce the effect on performance, there |
| 2318 | * is a slight delay before we do so. |
| 2319 | */ |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2320 | bool awake; |
| 2321 | |
| 2322 | /** |
Chris Wilson | 6f56103 | 2018-01-24 11:36:07 +0000 | [diff] [blame] | 2323 | * The number of times we have woken up. |
| 2324 | */ |
| 2325 | unsigned int epoch; |
| 2326 | #define I915_EPOCH_INVALID 0 |
| 2327 | |
| 2328 | /** |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 2329 | * We leave the user IRQ off as much as possible, |
| 2330 | * but this means that requests will finish and never |
| 2331 | * be retired once the system goes idle. Set a timer to |
| 2332 | * fire periodically while the ring is running. When it |
| 2333 | * fires, go retire requests. |
| 2334 | */ |
| 2335 | struct delayed_work retire_work; |
| 2336 | |
| 2337 | /** |
| 2338 | * When we detect an idle GPU, we want to turn on |
| 2339 | * powersaving features. So once we see that there |
| 2340 | * are no more requests outstanding and no more |
| 2341 | * arrive within a small period of time, we fire |
| 2342 | * off the idle_work. |
| 2343 | */ |
| 2344 | struct delayed_work idle_work; |
Chris Wilson | de867c2 | 2016-10-25 13:16:02 +0100 | [diff] [blame] | 2345 | |
| 2346 | ktime_t last_init_time; |
Oscar Mateo | a83014d | 2014-07-24 17:04:21 +0100 | [diff] [blame] | 2347 | } gt; |
| 2348 | |
Ville Syrjälä | 3be60de | 2015-09-08 18:05:45 +0300 | [diff] [blame] | 2349 | /* perform PHY state sanity checks? */ |
| 2350 | bool chv_phy_assert[2]; |
| 2351 | |
Mahesh Kumar | a3a8986 | 2016-12-01 21:19:34 +0530 | [diff] [blame] | 2352 | bool ipc_enabled; |
| 2353 | |
Pandiyan, Dhinakaran | f931894 | 2016-09-21 13:02:48 -0700 | [diff] [blame] | 2354 | /* Used to save the pipe-to-encoder mapping for audio */ |
| 2355 | struct intel_encoder *av_enc_map[I915_MAX_PIPES]; |
Takashi Iwai | 0bdf5a0 | 2015-11-30 18:19:39 +0100 | [diff] [blame] | 2356 | |
Jerome Anand | eef5732 | 2017-01-25 04:27:49 +0530 | [diff] [blame] | 2357 | /* necessary resource sharing with HDMI LPE audio driver. */ |
| 2358 | struct { |
| 2359 | struct platform_device *platdev; |
| 2360 | int irq; |
| 2361 | } lpe_audio; |
| 2362 | |
Tvrtko Ursulin | b46a33e | 2017-11-21 18:18:45 +0000 | [diff] [blame] | 2363 | struct i915_pmu pmu; |
| 2364 | |
Daniel Vetter | bdf1e7e | 2014-05-21 17:37:52 +0200 | [diff] [blame] | 2365 | /* |
| 2366 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch |
| 2367 | * will be rejected. Instead look for a better place. |
| 2368 | */ |
Jani Nikula | 77fec55 | 2014-03-31 14:27:22 +0300 | [diff] [blame] | 2369 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2370 | |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 2371 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
| 2372 | { |
Chris Wilson | 091387c | 2016-06-24 14:00:21 +0100 | [diff] [blame] | 2373 | return container_of(dev, struct drm_i915_private, drm); |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 2374 | } |
| 2375 | |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2376 | static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) |
Imre Deak | 888d0d4 | 2015-01-08 17:54:13 +0200 | [diff] [blame] | 2377 | { |
David Weinehall | c49d13e | 2016-08-22 13:32:42 +0300 | [diff] [blame] | 2378 | return to_i915(dev_get_drvdata(kdev)); |
Imre Deak | 888d0d4 | 2015-01-08 17:54:13 +0200 | [diff] [blame] | 2379 | } |
| 2380 | |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 2381 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) |
| 2382 | { |
| 2383 | return container_of(guc, struct drm_i915_private, guc); |
| 2384 | } |
| 2385 | |
Arkadiusz Hiler | 50beba5 | 2017-03-14 15:28:06 +0100 | [diff] [blame] | 2386 | static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) |
| 2387 | { |
| 2388 | return container_of(huc, struct drm_i915_private, huc); |
| 2389 | } |
| 2390 | |
Dave Gordon | b4ac5af | 2016-03-24 11:20:38 +0000 | [diff] [blame] | 2391 | /* Simple iterator over all initialised engines */ |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2392 | #define for_each_engine(engine__, dev_priv__, id__) \ |
| 2393 | for ((id__) = 0; \ |
| 2394 | (id__) < I915_NUM_ENGINES; \ |
| 2395 | (id__)++) \ |
| 2396 | for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) |
Dave Gordon | c3232b1 | 2016-03-23 18:19:53 +0000 | [diff] [blame] | 2397 | |
| 2398 | /* Iterator over subset of engines selected by mask */ |
Chris Wilson | bafb0fc | 2016-08-27 08:54:01 +0100 | [diff] [blame] | 2399 | #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ |
| 2400 | for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 2401 | tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) |
Mika Kuoppala | ee4b6fa | 2016-03-16 17:54:00 +0200 | [diff] [blame] | 2402 | |
Wu Fengguang | b1d7e4b | 2012-02-14 11:45:36 +0800 | [diff] [blame] | 2403 | enum hdmi_force_audio { |
| 2404 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ |
| 2405 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
| 2406 | HDMI_AUDIO_AUTO, /* trust EDID */ |
| 2407 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
| 2408 | }; |
| 2409 | |
Daniel Vetter | 190d6cd | 2013-07-04 13:06:28 +0200 | [diff] [blame] | 2410 | #define I915_GTT_OFFSET_NONE ((u32)-1) |
Chris Wilson | ed2f345 | 2012-11-15 11:32:19 +0000 | [diff] [blame] | 2411 | |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2412 | /* |
| 2413 | * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is |
Sagar Arun Kamble | d1b9d03 | 2015-09-14 21:35:42 +0530 | [diff] [blame] | 2414 | * considered to be the frontbuffer for the given plane interface-wise. This |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2415 | * doesn't mean that the hw necessarily already scans it out, but that any |
| 2416 | * rendering (by the cpu or gpu) will land in the frontbuffer eventually. |
| 2417 | * |
| 2418 | * We have one bit per pipe and per scanout plane type. |
| 2419 | */ |
Sagar Arun Kamble | d1b9d03 | 2015-09-14 21:35:42 +0530 | [diff] [blame] | 2420 | #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 |
Ville Syrjälä | c19e112 | 2018-01-23 20:33:43 +0200 | [diff] [blame] | 2421 | #define INTEL_FRONTBUFFER(pipe, plane_id) \ |
| 2422 | (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2423 | #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ |
Ville Syrjälä | c19e112 | 2018-01-23 20:33:43 +0200 | [diff] [blame] | 2424 | (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
Daniel Vetter | cc36513 | 2014-06-18 13:59:13 +0200 | [diff] [blame] | 2425 | #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ |
Sagar Arun Kamble | d1b9d03 | 2015-09-14 21:35:42 +0530 | [diff] [blame] | 2426 | (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2427 | |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2428 | /* |
| 2429 | * Optimised SGL iterator for GEM objects |
| 2430 | */ |
| 2431 | static __always_inline struct sgt_iter { |
| 2432 | struct scatterlist *sgp; |
| 2433 | union { |
| 2434 | unsigned long pfn; |
| 2435 | dma_addr_t dma; |
| 2436 | }; |
| 2437 | unsigned int curr; |
| 2438 | unsigned int max; |
| 2439 | } __sgt_iter(struct scatterlist *sgl, bool dma) { |
| 2440 | struct sgt_iter s = { .sgp = sgl }; |
| 2441 | |
| 2442 | if (s.sgp) { |
| 2443 | s.max = s.curr = s.sgp->offset; |
| 2444 | s.max += s.sgp->length; |
| 2445 | if (dma) |
| 2446 | s.dma = sg_dma_address(s.sgp); |
| 2447 | else |
| 2448 | s.pfn = page_to_pfn(sg_page(s.sgp)); |
| 2449 | } |
| 2450 | |
| 2451 | return s; |
| 2452 | } |
| 2453 | |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 2454 | static inline struct scatterlist *____sg_next(struct scatterlist *sg) |
| 2455 | { |
| 2456 | ++sg; |
| 2457 | if (unlikely(sg_is_chain(sg))) |
| 2458 | sg = sg_chain_ptr(sg); |
| 2459 | return sg; |
| 2460 | } |
| 2461 | |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2462 | /** |
Dave Gordon | 63d1532 | 2016-05-20 11:54:07 +0100 | [diff] [blame] | 2463 | * __sg_next - return the next scatterlist entry in a list |
| 2464 | * @sg: The current sg entry |
| 2465 | * |
| 2466 | * Description: |
| 2467 | * If the entry is the last, return NULL; otherwise, step to the next |
| 2468 | * element in the array (@sg@+1). If that's a chain pointer, follow it; |
| 2469 | * otherwise just return the pointer to the current element. |
| 2470 | **/ |
| 2471 | static inline struct scatterlist *__sg_next(struct scatterlist *sg) |
| 2472 | { |
| 2473 | #ifdef CONFIG_DEBUG_SG |
| 2474 | BUG_ON(sg->sg_magic != SG_MAGIC); |
| 2475 | #endif |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 2476 | return sg_is_last(sg) ? NULL : ____sg_next(sg); |
Dave Gordon | 63d1532 | 2016-05-20 11:54:07 +0100 | [diff] [blame] | 2477 | } |
| 2478 | |
| 2479 | /** |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2480 | * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table |
| 2481 | * @__dmap: DMA address (output) |
| 2482 | * @__iter: 'struct sgt_iter' (iterator state, internal) |
| 2483 | * @__sgt: sg_table to iterate over (input) |
| 2484 | */ |
| 2485 | #define for_each_sgt_dma(__dmap, __iter, __sgt) \ |
| 2486 | for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ |
| 2487 | ((__dmap) = (__iter).dma + (__iter).curr); \ |
Chris Wilson | e60b36f7 | 2017-09-13 11:57:54 +0100 | [diff] [blame] | 2488 | (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ |
| 2489 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 2490 | |
| 2491 | /** |
| 2492 | * for_each_sgt_page - iterate over the pages of the given sg_table |
| 2493 | * @__pp: page pointer (output) |
| 2494 | * @__iter: 'struct sgt_iter' (iterator state, internal) |
| 2495 | * @__sgt: sg_table to iterate over (input) |
| 2496 | */ |
| 2497 | #define for_each_sgt_page(__pp, __iter, __sgt) \ |
| 2498 | for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ |
| 2499 | ((__pp) = (__iter).pfn == 0 ? NULL : \ |
| 2500 | pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ |
Chris Wilson | e60b36f7 | 2017-09-13 11:57:54 +0100 | [diff] [blame] | 2501 | (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ |
| 2502 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) |
Daniel Vetter | a071fa0 | 2014-06-18 23:28:09 +0200 | [diff] [blame] | 2503 | |
Matthew Auld | a5c08166 | 2017-10-06 23:18:18 +0100 | [diff] [blame] | 2504 | static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) |
| 2505 | { |
| 2506 | unsigned int page_sizes; |
| 2507 | |
| 2508 | page_sizes = 0; |
| 2509 | while (sg) { |
| 2510 | GEM_BUG_ON(sg->offset); |
| 2511 | GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); |
| 2512 | page_sizes |= sg->length; |
| 2513 | sg = __sg_next(sg); |
| 2514 | } |
| 2515 | |
| 2516 | return page_sizes; |
| 2517 | } |
| 2518 | |
Tvrtko Ursulin | 5602452 | 2017-08-03 10:14:17 +0100 | [diff] [blame] | 2519 | static inline unsigned int i915_sg_segment_size(void) |
| 2520 | { |
| 2521 | unsigned int size = swiotlb_max_segment(); |
| 2522 | |
| 2523 | if (size == 0) |
| 2524 | return SCATTERLIST_MAX_SEGMENT; |
| 2525 | |
| 2526 | size = rounddown(size, PAGE_SIZE); |
| 2527 | /* swiotlb_max_segment_size can return 1 byte when it means one page. */ |
| 2528 | if (size < PAGE_SIZE) |
| 2529 | size = PAGE_SIZE; |
| 2530 | |
| 2531 | return size; |
| 2532 | } |
| 2533 | |
Tvrtko Ursulin | 5ca43ef | 2016-11-16 08:55:45 +0000 | [diff] [blame] | 2534 | static inline const struct intel_device_info * |
| 2535 | intel_info(const struct drm_i915_private *dev_priv) |
| 2536 | { |
| 2537 | return &dev_priv->info; |
| 2538 | } |
| 2539 | |
| 2540 | #define INTEL_INFO(dev_priv) intel_info((dev_priv)) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2541 | |
Tvrtko Ursulin | 55b8f2a | 2016-10-14 09:17:22 +0100 | [diff] [blame] | 2542 | #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2543 | #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2544 | |
Jani Nikula | e87a005 | 2015-10-20 15:22:02 +0300 | [diff] [blame] | 2545 | #define REVID_FOREVER 0xff |
Tvrtko Ursulin | 4805fe8 | 2016-11-04 14:42:46 +0000 | [diff] [blame] | 2546 | #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) |
Tvrtko Ursulin | ac657f6 | 2016-05-10 10:57:08 +0100 | [diff] [blame] | 2547 | |
| 2548 | #define GEN_FOREVER (0) |
Joonas Lahtinen | fe52e59 | 2017-09-13 14:52:54 +0300 | [diff] [blame] | 2549 | |
| 2550 | #define INTEL_GEN_MASK(s, e) ( \ |
| 2551 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ |
| 2552 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ |
| 2553 | GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ |
| 2554 | (s) != GEN_FOREVER ? (s) - 1 : 0) \ |
| 2555 | ) |
| 2556 | |
Tvrtko Ursulin | ac657f6 | 2016-05-10 10:57:08 +0100 | [diff] [blame] | 2557 | /* |
| 2558 | * Returns true if Gen is in inclusive range [Start, End]. |
| 2559 | * |
| 2560 | * Use GEN_FOREVER for unbound start and or end. |
| 2561 | */ |
Joonas Lahtinen | fe52e59 | 2017-09-13 14:52:54 +0300 | [diff] [blame] | 2562 | #define IS_GEN(dev_priv, s, e) \ |
| 2563 | (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) |
Tvrtko Ursulin | ac657f6 | 2016-05-10 10:57:08 +0100 | [diff] [blame] | 2564 | |
Jani Nikula | e87a005 | 2015-10-20 15:22:02 +0300 | [diff] [blame] | 2565 | /* |
| 2566 | * Return true if revision is in range [since,until] inclusive. |
| 2567 | * |
| 2568 | * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. |
| 2569 | */ |
| 2570 | #define IS_REVID(p, since, until) \ |
| 2571 | (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) |
| 2572 | |
Tvrtko Ursulin | ae7617f | 2017-09-27 17:41:38 +0100 | [diff] [blame] | 2573 | #define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) |
Tvrtko Ursulin | 5a127a8 | 2017-09-20 10:26:59 +0100 | [diff] [blame] | 2574 | |
| 2575 | #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) |
| 2576 | #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) |
| 2577 | #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) |
| 2578 | #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) |
| 2579 | #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) |
| 2580 | #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) |
| 2581 | #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) |
| 2582 | #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) |
| 2583 | #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) |
| 2584 | #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) |
| 2585 | #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) |
| 2586 | #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) |
Jani Nikula | f69c11a | 2016-11-30 17:43:05 +0200 | [diff] [blame] | 2587 | #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2588 | #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) |
| 2589 | #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) |
Tvrtko Ursulin | 5a127a8 | 2017-09-20 10:26:59 +0100 | [diff] [blame] | 2590 | #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) |
| 2591 | #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2592 | #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) |
Tvrtko Ursulin | 5a127a8 | 2017-09-20 10:26:59 +0100 | [diff] [blame] | 2593 | #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2594 | #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ |
| 2595 | (dev_priv)->info.gt == 1) |
Tvrtko Ursulin | 5a127a8 | 2017-09-20 10:26:59 +0100 | [diff] [blame] | 2596 | #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) |
| 2597 | #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) |
| 2598 | #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) |
| 2599 | #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) |
| 2600 | #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) |
| 2601 | #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) |
| 2602 | #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) |
| 2603 | #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) |
| 2604 | #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) |
| 2605 | #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) |
Rodrigo Vivi | 41231001 | 2018-01-11 16:00:04 -0200 | [diff] [blame] | 2606 | #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) |
Ville Syrjälä | 646d577 | 2016-10-31 22:37:14 +0200 | [diff] [blame] | 2607 | #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2608 | #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ |
| 2609 | (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) |
| 2610 | #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ |
| 2611 | ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ |
| 2612 | (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ |
| 2613 | (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) |
Ville Syrjälä | ebb72aa | 2015-06-03 15:45:12 +0300 | [diff] [blame] | 2614 | /* ULX machines are also considered ULT. */ |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2615 | #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ |
| 2616 | (INTEL_DEVID(dev_priv) & 0xf) == 0xe) |
| 2617 | #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2618 | (dev_priv)->info.gt == 3) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2619 | #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ |
| 2620 | (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) |
| 2621 | #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2622 | (dev_priv)->info.gt == 3) |
Paulo Zanoni | 9bbfd20 | 2014-04-29 11:00:22 -0300 | [diff] [blame] | 2623 | /* ULX machines are also considered ULT. */ |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2624 | #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ |
| 2625 | INTEL_DEVID(dev_priv) == 0x0A1E) |
| 2626 | #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ |
| 2627 | INTEL_DEVID(dev_priv) == 0x1913 || \ |
| 2628 | INTEL_DEVID(dev_priv) == 0x1916 || \ |
| 2629 | INTEL_DEVID(dev_priv) == 0x1921 || \ |
| 2630 | INTEL_DEVID(dev_priv) == 0x1926) |
| 2631 | #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ |
| 2632 | INTEL_DEVID(dev_priv) == 0x1915 || \ |
| 2633 | INTEL_DEVID(dev_priv) == 0x191E) |
| 2634 | #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ |
| 2635 | INTEL_DEVID(dev_priv) == 0x5913 || \ |
| 2636 | INTEL_DEVID(dev_priv) == 0x5916 || \ |
| 2637 | INTEL_DEVID(dev_priv) == 0x5921 || \ |
| 2638 | INTEL_DEVID(dev_priv) == 0x5926) |
| 2639 | #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ |
| 2640 | INTEL_DEVID(dev_priv) == 0x5915 || \ |
| 2641 | INTEL_DEVID(dev_priv) == 0x591E) |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 2642 | #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2643 | (dev_priv)->info.gt == 2) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2644 | #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2645 | (dev_priv)->info.gt == 3) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2646 | #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2647 | (dev_priv)->info.gt == 4) |
Lionel Landwerlin | 3891589 | 2017-06-13 12:23:07 +0100 | [diff] [blame] | 2648 | #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2649 | (dev_priv)->info.gt == 2) |
Lionel Landwerlin | 3891589 | 2017-06-13 12:23:07 +0100 | [diff] [blame] | 2650 | #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ |
Lionel Landwerlin | 18b5381 | 2017-08-30 17:12:07 +0100 | [diff] [blame] | 2651 | (dev_priv)->info.gt == 3) |
Rodrigo Vivi | da411a4 | 2017-06-09 15:02:50 -0700 | [diff] [blame] | 2652 | #define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ |
| 2653 | (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) |
Lionel Landwerlin | 22ea4f3 | 2017-09-18 12:21:24 +0100 | [diff] [blame] | 2654 | #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ |
| 2655 | (dev_priv)->info.gt == 2) |
Lionel Landwerlin | 4407eaa | 2017-11-10 19:08:40 +0000 | [diff] [blame] | 2656 | #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ |
| 2657 | (dev_priv)->info.gt == 3) |
Rodrigo Vivi | 3f43031 | 2018-01-29 15:22:14 -0800 | [diff] [blame] | 2658 | #define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \ |
| 2659 | (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004) |
Sagar Arun Kamble | 7a58bad | 2015-09-12 10:17:50 +0530 | [diff] [blame] | 2660 | |
Jani Nikula | c007fb4 | 2016-10-31 12:18:28 +0200 | [diff] [blame] | 2661 | #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2662 | |
Jani Nikula | ef712bb | 2015-10-20 15:22:00 +0300 | [diff] [blame] | 2663 | #define SKL_REVID_A0 0x0 |
| 2664 | #define SKL_REVID_B0 0x1 |
| 2665 | #define SKL_REVID_C0 0x2 |
| 2666 | #define SKL_REVID_D0 0x3 |
| 2667 | #define SKL_REVID_E0 0x4 |
| 2668 | #define SKL_REVID_F0 0x5 |
Mika Kuoppala | 4ba9c1f | 2016-07-20 14:26:12 +0300 | [diff] [blame] | 2669 | #define SKL_REVID_G0 0x6 |
| 2670 | #define SKL_REVID_H0 0x7 |
Hoath, Nicholas | e90a21d | 2015-02-05 10:47:17 +0000 | [diff] [blame] | 2671 | |
Jani Nikula | e87a005 | 2015-10-20 15:22:02 +0300 | [diff] [blame] | 2672 | #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) |
| 2673 | |
Jani Nikula | ef712bb | 2015-10-20 15:22:00 +0300 | [diff] [blame] | 2674 | #define BXT_REVID_A0 0x0 |
Jani Nikula | fffda3f | 2015-10-20 15:22:01 +0300 | [diff] [blame] | 2675 | #define BXT_REVID_A1 0x1 |
Jani Nikula | ef712bb | 2015-10-20 15:22:00 +0300 | [diff] [blame] | 2676 | #define BXT_REVID_B0 0x3 |
Ander Conselvan de Oliveira | a3f79ca | 2016-11-24 15:23:27 +0200 | [diff] [blame] | 2677 | #define BXT_REVID_B_LAST 0x8 |
Jani Nikula | ef712bb | 2015-10-20 15:22:00 +0300 | [diff] [blame] | 2678 | #define BXT_REVID_C0 0x9 |
Nick Hoath | 6c74c87 | 2015-03-20 09:03:52 +0000 | [diff] [blame] | 2679 | |
Tvrtko Ursulin | e2d214a | 2016-10-13 11:03:04 +0100 | [diff] [blame] | 2680 | #define IS_BXT_REVID(dev_priv, since, until) \ |
| 2681 | (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) |
Jani Nikula | e87a005 | 2015-10-20 15:22:02 +0300 | [diff] [blame] | 2682 | |
Mika Kuoppala | c033a37 | 2016-06-07 17:18:55 +0300 | [diff] [blame] | 2683 | #define KBL_REVID_A0 0x0 |
| 2684 | #define KBL_REVID_B0 0x1 |
Mika Kuoppala | fe90581 | 2016-06-07 17:19:03 +0300 | [diff] [blame] | 2685 | #define KBL_REVID_C0 0x2 |
| 2686 | #define KBL_REVID_D0 0x3 |
| 2687 | #define KBL_REVID_E0 0x4 |
Mika Kuoppala | c033a37 | 2016-06-07 17:18:55 +0300 | [diff] [blame] | 2688 | |
Tvrtko Ursulin | 0853723 | 2016-10-13 11:03:02 +0100 | [diff] [blame] | 2689 | #define IS_KBL_REVID(dev_priv, since, until) \ |
| 2690 | (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) |
Mika Kuoppala | c033a37 | 2016-06-07 17:18:55 +0300 | [diff] [blame] | 2691 | |
Ander Conselvan de Oliveira | f4f4b59 | 2017-02-22 08:34:29 +0200 | [diff] [blame] | 2692 | #define GLK_REVID_A0 0x0 |
| 2693 | #define GLK_REVID_A1 0x1 |
| 2694 | |
| 2695 | #define IS_GLK_REVID(dev_priv, since, until) \ |
| 2696 | (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) |
| 2697 | |
Paulo Zanoni | 3c2e0fd | 2017-06-06 13:30:34 -0700 | [diff] [blame] | 2698 | #define CNL_REVID_A0 0x0 |
| 2699 | #define CNL_REVID_B0 0x1 |
Rodrigo Vivi | e4ffc83 | 2017-08-22 16:58:28 -0700 | [diff] [blame] | 2700 | #define CNL_REVID_C0 0x2 |
Paulo Zanoni | 3c2e0fd | 2017-06-06 13:30:34 -0700 | [diff] [blame] | 2701 | |
| 2702 | #define IS_CNL_REVID(p, since, until) \ |
| 2703 | (IS_CANNONLAKE(p) && IS_REVID(p, since, until)) |
| 2704 | |
Jesse Barnes | 8543669 | 2011-04-06 12:11:14 -0700 | [diff] [blame] | 2705 | /* |
| 2706 | * The genX designation typically refers to the render engine, so render |
| 2707 | * capability related checks should use IS_GEN, while display and other checks |
| 2708 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular |
| 2709 | * chips, etc.). |
| 2710 | */ |
Tvrtko Ursulin | 5db9401 | 2016-10-13 11:03:10 +0100 | [diff] [blame] | 2711 | #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) |
| 2712 | #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) |
| 2713 | #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) |
| 2714 | #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) |
| 2715 | #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) |
| 2716 | #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) |
| 2717 | #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) |
| 2718 | #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) |
Rodrigo Vivi | 413f3c1 | 2017-06-06 13:30:30 -0700 | [diff] [blame] | 2719 | #define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9))) |
Rodrigo Vivi | 41231001 | 2018-01-11 16:00:04 -0200 | [diff] [blame] | 2720 | #define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10))) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2721 | |
Rodrigo Vivi | 8727dc0 | 2016-12-18 13:36:26 -0800 | [diff] [blame] | 2722 | #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) |
Rodrigo Vivi | b976dc5 | 2017-01-23 10:32:37 -0800 | [diff] [blame] | 2723 | #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) |
| 2724 | #define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) |
Ander Conselvan de Oliveira | 3e4274f | 2016-11-10 17:23:09 +0200 | [diff] [blame] | 2725 | |
Tvrtko Ursulin | a19d6ff | 2016-06-23 14:52:41 +0100 | [diff] [blame] | 2726 | #define ENGINE_MASK(id) BIT(id) |
| 2727 | #define RENDER_RING ENGINE_MASK(RCS) |
| 2728 | #define BSD_RING ENGINE_MASK(VCS) |
| 2729 | #define BLT_RING ENGINE_MASK(BCS) |
| 2730 | #define VEBOX_RING ENGINE_MASK(VECS) |
| 2731 | #define BSD2_RING ENGINE_MASK(VCS2) |
| 2732 | #define ALL_ENGINES (~0) |
Mika Kuoppala | ee4b6fa | 2016-03-16 17:54:00 +0200 | [diff] [blame] | 2733 | |
Tvrtko Ursulin | a19d6ff | 2016-06-23 14:52:41 +0100 | [diff] [blame] | 2734 | #define HAS_ENGINE(dev_priv, id) \ |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 2735 | (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) |
Tvrtko Ursulin | a19d6ff | 2016-06-23 14:52:41 +0100 | [diff] [blame] | 2736 | |
| 2737 | #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) |
| 2738 | #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) |
| 2739 | #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) |
| 2740 | #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) |
| 2741 | |
Chris Wilson | 93c6e96 | 2017-11-20 20:55:04 +0000 | [diff] [blame] | 2742 | #define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) |
| 2743 | |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 2744 | #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) |
| 2745 | #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) |
| 2746 | #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) |
Tvrtko Ursulin | 8652744 | 2016-10-13 11:03:00 +0100 | [diff] [blame] | 2747 | #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ |
| 2748 | IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2749 | |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 2750 | #define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) |
Daniel Vetter | 1d2a314 | 2012-02-09 17:15:46 +0100 | [diff] [blame] | 2751 | |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 2752 | #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ |
| 2753 | ((dev_priv)->info.has_logical_ring_contexts) |
Michał Winiarski | a4598d1 | 2017-10-25 22:00:18 +0200 | [diff] [blame] | 2754 | #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ |
| 2755 | ((dev_priv)->info.has_logical_ring_preemption) |
Chris Wilson | fb5c551 | 2017-11-20 20:55:00 +0000 | [diff] [blame] | 2756 | |
| 2757 | #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) |
| 2758 | |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame] | 2759 | #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) |
| 2760 | #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) |
| 2761 | #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) |
Matthew Auld | a5c08166 | 2017-10-06 23:18:18 +0100 | [diff] [blame] | 2762 | #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ |
| 2763 | GEM_BUG_ON((sizes) == 0); \ |
| 2764 | ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ |
| 2765 | }) |
Tvrtko Ursulin | 0031fb9 | 2016-11-04 14:42:44 +0000 | [diff] [blame] | 2766 | |
| 2767 | #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) |
| 2768 | #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ |
| 2769 | ((dev_priv)->info.overlay_needs_physical) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2770 | |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 2771 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
Jani Nikula | 2a307c2 | 2016-11-30 17:43:04 +0200 | [diff] [blame] | 2772 | #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) |
Mika Kuoppala | 06e668a | 2015-12-16 19:18:37 +0200 | [diff] [blame] | 2773 | |
| 2774 | /* WaRsDisableCoarsePowerGating:skl,bxt */ |
Tvrtko Ursulin | 6125151 | 2016-06-21 15:07:14 +0100 | [diff] [blame] | 2775 | #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ |
Jani Nikula | f2254d2 | 2017-02-15 17:21:39 +0200 | [diff] [blame] | 2776 | (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) |
Mika Kuoppala | 185c66e | 2016-04-05 15:56:16 +0300 | [diff] [blame] | 2777 | |
Daniel Vetter | 4e6b788 | 2014-02-07 16:33:20 +0100 | [diff] [blame] | 2778 | /* |
| 2779 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts |
| 2780 | * even when in MSI mode. This results in spurious interrupt warnings if the |
| 2781 | * legacy irq no. is shared with another device. The kernel then disables that |
| 2782 | * interrupt source and so prevents the other device from working properly. |
Ville Syrjälä | 309bd8e | 2017-08-18 21:37:05 +0300 | [diff] [blame] | 2783 | * |
| 2784 | * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX |
| 2785 | * interrupts. |
Daniel Vetter | 4e6b788 | 2014-02-07 16:33:20 +0100 | [diff] [blame] | 2786 | */ |
Ville Syrjälä | 309bd8e | 2017-08-18 21:37:05 +0300 | [diff] [blame] | 2787 | #define HAS_AUX_IRQ(dev_priv) true |
| 2788 | #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) |
Daniel Vetter | b45305f | 2012-12-17 16:21:27 +0100 | [diff] [blame] | 2789 | |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2790 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
| 2791 | * rows, which changed the alignment requirements and fence programming. |
| 2792 | */ |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2793 | #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ |
| 2794 | !(IS_I915G(dev_priv) || \ |
| 2795 | IS_I915GM(dev_priv))) |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2796 | #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) |
| 2797 | #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2798 | |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2799 | #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2800 | #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) |
Ville Syrjälä | 024faac | 2017-03-27 21:55:42 +0300 | [diff] [blame] | 2801 | #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2802 | |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2803 | #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) |
Damien Lespiau | f5adf94 | 2013-06-24 18:29:34 +0100 | [diff] [blame] | 2804 | |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2805 | #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) |
Jani Nikula | 0c9b371 | 2015-05-18 17:10:01 +0300 | [diff] [blame] | 2806 | |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2807 | #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) |
| 2808 | #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) |
| 2809 | #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) |
Chris Wilson | fb6db0f | 2017-12-01 11:30:30 +0000 | [diff] [blame] | 2810 | |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2811 | #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) |
| 2812 | #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) |
Chris Wilson | fb6db0f | 2017-12-01 11:30:30 +0000 | [diff] [blame] | 2813 | #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ |
Paulo Zanoni | affa935 | 2012-11-23 15:30:39 -0200 | [diff] [blame] | 2814 | |
Tvrtko Ursulin | 56b857a | 2016-11-07 09:29:20 +0000 | [diff] [blame] | 2815 | #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) |
Daniel Vetter | eb80562 | 2015-05-04 14:58:44 +0200 | [diff] [blame] | 2816 | |
Tvrtko Ursulin | 6772ffe | 2016-10-13 11:02:55 +0100 | [diff] [blame] | 2817 | #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) |
Joonas Lahtinen | dfc5148 | 2016-11-03 10:39:46 +0200 | [diff] [blame] | 2818 | #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) |
| 2819 | |
Mahesh Kumar | e57f1c02 | 2017-08-17 19:15:27 +0530 | [diff] [blame] | 2820 | #define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) |
| 2821 | |
Dave Gordon | 1a3d189 | 2016-05-13 15:36:30 +0100 | [diff] [blame] | 2822 | /* |
| 2823 | * For now, anything with a GuC requires uCode loading, and then supports |
| 2824 | * command submission once loaded. But these are logically independent |
| 2825 | * properties, so we have separate macros to test them. |
| 2826 | */ |
Tvrtko Ursulin | 4805fe8 | 2016-11-04 14:42:46 +0000 | [diff] [blame] | 2827 | #define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) |
Michal Wajdeczko | f8a58d6 | 2017-05-26 11:13:25 +0000 | [diff] [blame] | 2828 | #define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct) |
Tvrtko Ursulin | 4805fe8 | 2016-11-04 14:42:46 +0000 | [diff] [blame] | 2829 | #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) |
| 2830 | #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) |
Michal Wajdeczko | 2fe2d4e | 2017-12-06 13:53:10 +0000 | [diff] [blame] | 2831 | |
| 2832 | /* For now, anything with a GuC has also HuC */ |
| 2833 | #define HAS_HUC(dev_priv) (HAS_GUC(dev_priv)) |
Anusha Srivatsa | bd13285 | 2017-01-18 08:05:53 -0800 | [diff] [blame] | 2834 | #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) |
Alex Dai | 33a732f | 2015-08-12 15:43:36 +0100 | [diff] [blame] | 2835 | |
Michal Wajdeczko | 93ffbe8 | 2017-12-06 13:53:12 +0000 | [diff] [blame] | 2836 | /* Having a GuC is not the same as using a GuC */ |
Michal Wajdeczko | 121981f | 2017-12-06 13:53:15 +0000 | [diff] [blame] | 2837 | #define USES_GUC(dev_priv) intel_uc_is_using_guc() |
| 2838 | #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() |
| 2839 | #define USES_HUC(dev_priv) intel_uc_is_using_huc() |
Michal Wajdeczko | 93ffbe8 | 2017-12-06 13:53:12 +0000 | [diff] [blame] | 2840 | |
Tvrtko Ursulin | 4805fe8 | 2016-11-04 14:42:46 +0000 | [diff] [blame] | 2841 | #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) |
Abdiel Janulgue | a9ed33c | 2015-07-01 10:12:23 +0300 | [diff] [blame] | 2842 | |
Tvrtko Ursulin | 4805fe8 | 2016-11-04 14:42:46 +0000 | [diff] [blame] | 2843 | #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) |
arun.siluvery@linux.intel.com | 33e141e | 2016-06-03 06:34:33 +0100 | [diff] [blame] | 2844 | |
Ville Syrjälä | c5e855d | 2017-06-21 20:49:44 +0300 | [diff] [blame] | 2845 | #define INTEL_PCH_DEVICE_ID_MASK 0xff80 |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 2846 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
| 2847 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
| 2848 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
| 2849 | #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 |
| 2850 | #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
Ville Syrjälä | c5e855d | 2017-06-21 20:49:44 +0300 | [diff] [blame] | 2851 | #define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80 |
| 2852 | #define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80 |
Satheeshakrishna M | e7e7ea2 | 2014-04-09 11:08:57 +0530 | [diff] [blame] | 2853 | #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
| 2854 | #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 |
Ville Syrjälä | c5e855d | 2017-06-21 20:49:44 +0300 | [diff] [blame] | 2855 | #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 |
Rodrigo Vivi | 7b22b8c | 2017-06-02 13:06:39 -0700 | [diff] [blame] | 2856 | #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 |
Dhinakaran Pandiyan | ec7e0bb | 2017-06-02 13:06:40 -0700 | [diff] [blame] | 2857 | #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 |
Anusha Srivatsa | 5c8ea01 | 2018-01-11 16:00:10 -0200 | [diff] [blame] | 2858 | #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 |
Robert Beckett | 30c964a | 2015-08-28 13:10:22 +0100 | [diff] [blame] | 2859 | #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 |
Jesse Barnes | 1844a66 | 2016-03-16 13:31:30 -0700 | [diff] [blame] | 2860 | #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 |
Gerd Hoffmann | 39bfcd52 | 2015-11-26 12:03:51 +0100 | [diff] [blame] | 2861 | #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ |
Paulo Zanoni | 17a303e | 2012-11-20 15:12:07 -0200 | [diff] [blame] | 2862 | |
Tvrtko Ursulin | 6e26695 | 2016-10-13 11:02:53 +0100 | [diff] [blame] | 2863 | #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) |
Anusha Srivatsa | 0b58436 | 2018-01-11 16:00:05 -0200 | [diff] [blame] | 2864 | #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) |
Rodrigo Vivi | 7b22b8c | 2017-06-02 13:06:39 -0700 | [diff] [blame] | 2865 | #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) |
Dhinakaran Pandiyan | ec7e0bb | 2017-06-02 13:06:40 -0700 | [diff] [blame] | 2866 | #define HAS_PCH_CNP_LP(dev_priv) \ |
| 2867 | ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) |
Tvrtko Ursulin | 6e26695 | 2016-10-13 11:02:53 +0100 | [diff] [blame] | 2868 | #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) |
| 2869 | #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) |
| 2870 | #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) |
Tvrtko Ursulin | 4f8036a | 2016-10-13 11:02:52 +0100 | [diff] [blame] | 2871 | #define HAS_PCH_LPT_LP(dev_priv) \ |
Ville Syrjälä | c5e855d | 2017-06-21 20:49:44 +0300 | [diff] [blame] | 2872 | ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ |
| 2873 | (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) |
Tvrtko Ursulin | 4f8036a | 2016-10-13 11:02:52 +0100 | [diff] [blame] | 2874 | #define HAS_PCH_LPT_H(dev_priv) \ |
Ville Syrjälä | c5e855d | 2017-06-21 20:49:44 +0300 | [diff] [blame] | 2875 | ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ |
| 2876 | (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE) |
Tvrtko Ursulin | 6e26695 | 2016-10-13 11:02:53 +0100 | [diff] [blame] | 2877 | #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) |
| 2878 | #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) |
| 2879 | #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) |
| 2880 | #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 2881 | |
Tvrtko Ursulin | 49cff96 | 2016-10-13 11:02:54 +0100 | [diff] [blame] | 2882 | #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) |
Sonika Jindal | 5fafe29 | 2014-07-21 15:23:38 +0530 | [diff] [blame] | 2883 | |
Rodrigo Vivi | ff15947 | 2017-06-09 15:26:14 -0700 | [diff] [blame] | 2884 | #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) |
Shashank Sharma | 6389dd8 | 2016-10-14 19:56:50 +0530 | [diff] [blame] | 2885 | |
Ben Widawsky | 040d2ba | 2013-09-19 11:01:40 -0700 | [diff] [blame] | 2886 | /* DPF == dynamic parity feature */ |
Tvrtko Ursulin | 3c9192b | 2016-10-13 11:03:05 +0100 | [diff] [blame] | 2887 | #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) |
Tvrtko Ursulin | 50a0bc9 | 2016-10-13 11:02:58 +0100 | [diff] [blame] | 2888 | #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ |
| 2889 | 2 : HAS_L3_DPF(dev_priv)) |
Ben Widawsky | e1ef7cc | 2012-07-24 20:47:31 -0700 | [diff] [blame] | 2890 | |
Ben Widawsky | c8735b0 | 2012-09-07 19:43:39 -0700 | [diff] [blame] | 2891 | #define GT_FREQUENCY_MULTIPLIER 50 |
Akash Goel | de43ae9 | 2015-03-06 11:07:14 +0530 | [diff] [blame] | 2892 | #define GEN9_FREQ_SCALER 3 |
Ben Widawsky | c8735b0 | 2012-09-07 19:43:39 -0700 | [diff] [blame] | 2893 | |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 2894 | #include "i915_trace.h" |
| 2895 | |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 2896 | static inline bool intel_vtd_active(void) |
Chris Wilson | 48f112f | 2016-06-24 14:07:14 +0100 | [diff] [blame] | 2897 | { |
| 2898 | #ifdef CONFIG_INTEL_IOMMU |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 2899 | if (intel_iommu_gfx_mapped) |
Chris Wilson | 48f112f | 2016-06-24 14:07:14 +0100 | [diff] [blame] | 2900 | return true; |
| 2901 | #endif |
| 2902 | return false; |
| 2903 | } |
| 2904 | |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 2905 | static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) |
| 2906 | { |
| 2907 | return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active(); |
| 2908 | } |
| 2909 | |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2910 | static inline bool |
| 2911 | intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) |
| 2912 | { |
Chris Wilson | 80debff | 2017-05-25 13:16:12 +0100 | [diff] [blame] | 2913 | return IS_BROXTON(dev_priv) && intel_vtd_active(); |
Jon Bloomfield | 0ef34ad | 2017-05-24 08:54:11 -0700 | [diff] [blame] | 2914 | } |
| 2915 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 2916 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
David Weinehall | 351c3b5 | 2016-08-22 13:32:41 +0300 | [diff] [blame] | 2917 | int enable_ppgtt); |
Chris Wilson | 0e4ca10 | 2016-04-29 13:18:22 +0100 | [diff] [blame] | 2918 | |
Chris Wilson | 0673ad4 | 2016-06-24 14:00:22 +0100 | [diff] [blame] | 2919 | /* i915_drv.c */ |
Imre Deak | d15d753 | 2016-03-18 10:46:10 +0200 | [diff] [blame] | 2920 | void __printf(3, 4) |
| 2921 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, |
| 2922 | const char *fmt, ...); |
| 2923 | |
| 2924 | #define i915_report_error(dev_priv, fmt, ...) \ |
| 2925 | __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) |
| 2926 | |
Ben Widawsky | c43b563 | 2012-04-16 14:07:40 -0700 | [diff] [blame] | 2927 | #ifdef CONFIG_COMPAT |
Dave Airlie | 0d6aa60 | 2006-01-02 20:14:23 +1100 | [diff] [blame] | 2928 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
| 2929 | unsigned long arg); |
Jani Nikula | 55edf41 | 2016-11-01 17:40:44 +0200 | [diff] [blame] | 2930 | #else |
| 2931 | #define i915_compat_ioctl NULL |
Ben Widawsky | c43b563 | 2012-04-16 14:07:40 -0700 | [diff] [blame] | 2932 | #endif |
Jani Nikula | efab069 | 2016-09-15 16:28:54 +0300 | [diff] [blame] | 2933 | extern const struct dev_pm_ops i915_pm_ops; |
| 2934 | |
| 2935 | extern int i915_driver_load(struct pci_dev *pdev, |
| 2936 | const struct pci_device_id *ent); |
| 2937 | extern void i915_driver_unload(struct drm_device *dev); |
Chris Wilson | dc97997 | 2016-05-10 14:10:04 +0100 | [diff] [blame] | 2938 | extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); |
| 2939 | extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); |
Chris Wilson | 535275d | 2017-07-21 13:32:37 +0100 | [diff] [blame] | 2940 | |
| 2941 | #define I915_RESET_QUIET BIT(0) |
| 2942 | extern void i915_reset(struct drm_i915_private *i915, unsigned int flags); |
| 2943 | extern int i915_reset_engine(struct intel_engine_cs *engine, |
| 2944 | unsigned int flags); |
| 2945 | |
Michel Thierry | 142bc7d | 2017-06-20 10:57:46 +0100 | [diff] [blame] | 2946 | extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); |
Michel Thierry | cb20a3c | 2017-10-30 11:56:14 -0700 | [diff] [blame] | 2947 | extern int intel_reset_guc(struct drm_i915_private *dev_priv); |
Michel Thierry | 6acbea8 | 2017-10-31 15:53:09 -0700 | [diff] [blame] | 2948 | extern int intel_guc_reset_engine(struct intel_guc *guc, |
| 2949 | struct intel_engine_cs *engine); |
Tomas Elf | fc0768c | 2016-03-21 16:26:59 +0000 | [diff] [blame] | 2950 | extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); |
Mika Kuoppala | 3ac168a | 2016-11-01 18:43:03 +0200 | [diff] [blame] | 2951 | extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 2952 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
| 2953 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
| 2954 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
| 2955 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
Imre Deak | 650ad97 | 2014-04-18 16:35:02 +0300 | [diff] [blame] | 2956 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
Jesse Barnes | 7648fa9 | 2010-05-20 14:28:11 -0700 | [diff] [blame] | 2957 | |
Joonas Lahtinen | 63ffbcd | 2017-04-28 10:53:36 +0300 | [diff] [blame] | 2958 | int intel_engines_init_mmio(struct drm_i915_private *dev_priv); |
Chris Wilson | bb8f0f5 | 2017-01-24 11:01:34 +0000 | [diff] [blame] | 2959 | int intel_engines_init(struct drm_i915_private *dev_priv); |
| 2960 | |
Jani Nikula | 77913b3 | 2015-06-18 13:06:16 +0300 | [diff] [blame] | 2961 | /* intel_hotplug.c */ |
Tvrtko Ursulin | 91d1425 | 2016-05-06 14:48:28 +0100 | [diff] [blame] | 2962 | void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
| 2963 | u32 pin_mask, u32 long_mask); |
Jani Nikula | 77913b3 | 2015-06-18 13:06:16 +0300 | [diff] [blame] | 2964 | void intel_hpd_init(struct drm_i915_private *dev_priv); |
| 2965 | void intel_hpd_init_work(struct drm_i915_private *dev_priv); |
| 2966 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); |
Rodrigo Vivi | cf53902 | 2018-01-29 15:22:21 -0800 | [diff] [blame] | 2967 | enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, |
| 2968 | enum hpd_pin pin); |
| 2969 | enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, |
| 2970 | enum port port); |
Lyude | b236d7c8 | 2016-06-21 17:03:43 -0400 | [diff] [blame] | 2971 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); |
| 2972 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); |
Jani Nikula | 77913b3 | 2015-06-18 13:06:16 +0300 | [diff] [blame] | 2973 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2974 | /* i915_irq.c */ |
Chris Wilson | 26a02b8 | 2016-07-01 17:23:13 +0100 | [diff] [blame] | 2975 | static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) |
| 2976 | { |
| 2977 | unsigned long delay; |
| 2978 | |
Michal Wajdeczko | 4f044a8 | 2017-09-19 19:38:44 +0000 | [diff] [blame] | 2979 | if (unlikely(!i915_modparams.enable_hangcheck)) |
Chris Wilson | 26a02b8 | 2016-07-01 17:23:13 +0100 | [diff] [blame] | 2980 | return; |
| 2981 | |
| 2982 | /* Don't continually defer the hangcheck so that it is always run at |
| 2983 | * least once after work has been scheduled on any ring. Otherwise, |
| 2984 | * we will ignore a hung ring if a second ring is kept busy. |
| 2985 | */ |
| 2986 | |
| 2987 | delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); |
| 2988 | queue_delayed_work(system_long_wq, |
| 2989 | &dev_priv->gpu_error.hangcheck_work, delay); |
| 2990 | } |
| 2991 | |
Mika Kuoppala | 5817446 | 2014-02-25 17:11:26 +0200 | [diff] [blame] | 2992 | __printf(3, 4) |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 2993 | void i915_handle_error(struct drm_i915_private *dev_priv, |
| 2994 | u32 engine_mask, |
Mika Kuoppala | 5817446 | 2014-02-25 17:11:26 +0200 | [diff] [blame] | 2995 | const char *fmt, ...); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | |
Daniel Vetter | b963291 | 2014-09-30 10:56:44 +0200 | [diff] [blame] | 2997 | extern void intel_irq_init(struct drm_i915_private *dev_priv); |
Joonas Lahtinen | cefcff8 | 2017-04-28 10:58:39 +0300 | [diff] [blame] | 2998 | extern void intel_irq_fini(struct drm_i915_private *dev_priv); |
Daniel Vetter | 2aeb7d3 | 2014-09-30 10:56:43 +0200 | [diff] [blame] | 2999 | int intel_irq_install(struct drm_i915_private *dev_priv); |
| 3000 | void intel_irq_uninstall(struct drm_i915_private *dev_priv); |
Chris Wilson | 907b28c | 2013-07-19 20:36:52 +0100 | [diff] [blame] | 3001 | |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 3002 | static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) |
| 3003 | { |
Zhenyu Wang | feddf6e | 2016-10-20 17:15:03 +0800 | [diff] [blame] | 3004 | return dev_priv->gvt; |
Zhi Wang | 0ad35fe | 2016-06-16 08:07:00 -0400 | [diff] [blame] | 3005 | } |
| 3006 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3007 | static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 3008 | { |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3009 | return dev_priv->vgpu.active; |
Yu Zhang | cf9d289 | 2015-02-10 19:05:47 +0800 | [diff] [blame] | 3010 | } |
Jesse Barnes | b1f14ad | 2011-04-06 12:13:38 -0700 | [diff] [blame] | 3011 | |
Ville Syrjälä | 6b12ca5 | 2017-09-14 18:17:31 +0300 | [diff] [blame] | 3012 | u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, |
| 3013 | enum pipe pipe); |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 3014 | void |
Jani Nikula | 50227e1 | 2014-03-31 14:27:21 +0300 | [diff] [blame] | 3015 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
Imre Deak | 755e901 | 2014-02-10 18:42:47 +0200 | [diff] [blame] | 3016 | u32 status_mask); |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 3017 | |
| 3018 | void |
Jani Nikula | 50227e1 | 2014-03-31 14:27:21 +0300 | [diff] [blame] | 3019 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
Imre Deak | 755e901 | 2014-02-10 18:42:47 +0200 | [diff] [blame] | 3020 | u32 status_mask); |
Keith Packard | 7c46358 | 2008-11-04 02:03:27 -0800 | [diff] [blame] | 3021 | |
Imre Deak | f8b79e5 | 2014-03-04 19:23:07 +0200 | [diff] [blame] | 3022 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); |
| 3023 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); |
Egbert Eich | 0706f17 | 2015-09-23 16:15:27 +0200 | [diff] [blame] | 3024 | void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, |
| 3025 | uint32_t mask, |
| 3026 | uint32_t bits); |
Ville Syrjälä | fbdedaea | 2015-11-23 18:06:16 +0200 | [diff] [blame] | 3027 | void ilk_update_display_irq(struct drm_i915_private *dev_priv, |
| 3028 | uint32_t interrupt_mask, |
| 3029 | uint32_t enabled_irq_mask); |
| 3030 | static inline void |
| 3031 | ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) |
| 3032 | { |
| 3033 | ilk_update_display_irq(dev_priv, bits, bits); |
| 3034 | } |
| 3035 | static inline void |
| 3036 | ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) |
| 3037 | { |
| 3038 | ilk_update_display_irq(dev_priv, bits, 0); |
| 3039 | } |
Ville Syrjälä | 013d375 | 2015-11-23 18:06:17 +0200 | [diff] [blame] | 3040 | void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, |
| 3041 | enum pipe pipe, |
| 3042 | uint32_t interrupt_mask, |
| 3043 | uint32_t enabled_irq_mask); |
| 3044 | static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, |
| 3045 | enum pipe pipe, uint32_t bits) |
| 3046 | { |
| 3047 | bdw_update_pipe_irq(dev_priv, pipe, bits, bits); |
| 3048 | } |
| 3049 | static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, |
| 3050 | enum pipe pipe, uint32_t bits) |
| 3051 | { |
| 3052 | bdw_update_pipe_irq(dev_priv, pipe, bits, 0); |
| 3053 | } |
Daniel Vetter | 47339cd | 2014-09-30 10:56:46 +0200 | [diff] [blame] | 3054 | void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
| 3055 | uint32_t interrupt_mask, |
| 3056 | uint32_t enabled_irq_mask); |
Ville Syrjälä | 1444326 | 2015-11-23 18:06:15 +0200 | [diff] [blame] | 3057 | static inline void |
| 3058 | ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) |
| 3059 | { |
| 3060 | ibx_display_interrupt_update(dev_priv, bits, bits); |
| 3061 | } |
| 3062 | static inline void |
| 3063 | ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) |
| 3064 | { |
| 3065 | ibx_display_interrupt_update(dev_priv, bits, 0); |
| 3066 | } |
| 3067 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3068 | /* i915_gem.c */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3069 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
| 3070 | struct drm_file *file_priv); |
| 3071 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
| 3072 | struct drm_file *file_priv); |
| 3073 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, |
| 3074 | struct drm_file *file_priv); |
| 3075 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 3076 | struct drm_file *file_priv); |
Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 3077 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
| 3078 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3079 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
| 3080 | struct drm_file *file_priv); |
| 3081 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
| 3082 | struct drm_file *file_priv); |
| 3083 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
| 3084 | struct drm_file *file_priv); |
Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3085 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
| 3086 | struct drm_file *file_priv); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3087 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
| 3088 | struct drm_file *file_priv); |
Ben Widawsky | 199adf4 | 2012-09-21 17:01:20 -0700 | [diff] [blame] | 3089 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
| 3090 | struct drm_file *file); |
| 3091 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
| 3092 | struct drm_file *file); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3093 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
| 3094 | struct drm_file *file_priv); |
Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 3095 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
| 3096 | struct drm_file *file_priv); |
Chris Wilson | 111dbca | 2017-01-10 12:10:44 +0000 | [diff] [blame] | 3097 | int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, |
| 3098 | struct drm_file *file_priv); |
| 3099 | int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
| 3100 | struct drm_file *file_priv); |
Chris Wilson | 8a2421b | 2017-06-16 15:05:22 +0100 | [diff] [blame] | 3101 | int i915_gem_init_userptr(struct drm_i915_private *dev_priv); |
| 3102 | void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); |
Chris Wilson | 5cc9ed4 | 2014-05-16 14:22:37 +0100 | [diff] [blame] | 3103 | int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, |
| 3104 | struct drm_file *file); |
Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 3105 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
| 3106 | struct drm_file *file_priv); |
Ben Widawsky | 23ba4fd | 2012-05-24 15:03:10 -0700 | [diff] [blame] | 3107 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
| 3108 | struct drm_file *file_priv); |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 3109 | void i915_gem_sanitize(struct drm_i915_private *i915); |
Tvrtko Ursulin | cb15d9f | 2016-12-01 14:16:39 +0000 | [diff] [blame] | 3110 | int i915_gem_load_init(struct drm_i915_private *dev_priv); |
| 3111 | void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); |
Imre Deak | 40ae4e1 | 2016-03-16 14:54:03 +0200 | [diff] [blame] | 3112 | void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); |
Chris Wilson | 6a800ea | 2016-09-21 14:51:07 +0100 | [diff] [blame] | 3113 | int i915_gem_freeze(struct drm_i915_private *dev_priv); |
Chris Wilson | 461fb99 | 2016-05-14 07:26:33 +0100 | [diff] [blame] | 3114 | int i915_gem_freeze_late(struct drm_i915_private *dev_priv); |
| 3115 | |
Tvrtko Ursulin | 187685c | 2016-12-01 14:16:36 +0000 | [diff] [blame] | 3116 | void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 3117 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
Chris Wilson | 37e680a | 2012-06-07 15:38:42 +0100 | [diff] [blame] | 3118 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
| 3119 | const struct drm_i915_gem_object_ops *ops); |
Tvrtko Ursulin | 12d79d7 | 2016-12-01 14:16:37 +0000 | [diff] [blame] | 3120 | struct drm_i915_gem_object * |
| 3121 | i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); |
| 3122 | struct drm_i915_gem_object * |
| 3123 | i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, |
| 3124 | const void *data, size_t size); |
Chris Wilson | b1f788c | 2016-08-04 07:52:45 +0100 | [diff] [blame] | 3125 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3126 | void i915_gem_free_object(struct drm_gem_object *obj); |
Chris Wilson | 42dcedd | 2012-11-15 11:32:30 +0000 | [diff] [blame] | 3127 | |
Chris Wilson | bdeb978 | 2016-12-23 14:57:56 +0000 | [diff] [blame] | 3128 | static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) |
| 3129 | { |
| 3130 | /* A single pass should suffice to release all the freed objects (along |
| 3131 | * most call paths) , but be a little more paranoid in that freeing |
| 3132 | * the objects does take a little amount of time, during which the rcu |
| 3133 | * callbacks could have added new objects into the freed list, and |
| 3134 | * armed the work again. |
| 3135 | */ |
| 3136 | do { |
| 3137 | rcu_barrier(); |
| 3138 | } while (flush_work(&i915->mm.free_work)); |
| 3139 | } |
| 3140 | |
Chris Wilson | 3b19f16 | 2017-07-18 14:41:24 +0100 | [diff] [blame] | 3141 | static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) |
| 3142 | { |
| 3143 | /* |
| 3144 | * Similar to objects above (see i915_gem_drain_freed-objects), in |
| 3145 | * general we have workers that are armed by RCU and then rearm |
| 3146 | * themselves in their callbacks. To be paranoid, we need to |
| 3147 | * drain the workqueue a second time after waiting for the RCU |
| 3148 | * grace period so that we catch work queued via RCU from the first |
| 3149 | * pass. As neither drain_workqueue() nor flush_workqueue() report |
| 3150 | * a result, we make an assumption that we only don't require more |
| 3151 | * than 2 passes to catch all recursive RCU delayed work. |
| 3152 | * |
| 3153 | */ |
| 3154 | int pass = 2; |
| 3155 | do { |
| 3156 | rcu_barrier(); |
| 3157 | drain_workqueue(i915->wq); |
| 3158 | } while (--pass); |
| 3159 | } |
| 3160 | |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3161 | struct i915_vma * __must_check |
Joonas Lahtinen | ec7adb6 | 2015-03-16 14:11:13 +0200 | [diff] [blame] | 3162 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
| 3163 | const struct i915_ggtt_view *view, |
Chris Wilson | 91b2db6 | 2016-08-04 16:32:23 +0100 | [diff] [blame] | 3164 | u64 size, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 3165 | u64 alignment, |
| 3166 | u64 flags); |
Tvrtko Ursulin | fe14d5f | 2014-12-10 17:27:58 +0000 | [diff] [blame] | 3167 | |
Chris Wilson | aa653a6 | 2016-08-04 07:52:27 +0100 | [diff] [blame] | 3168 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3169 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3170 | |
Chris Wilson | 7c108fd | 2016-10-24 13:42:18 +0100 | [diff] [blame] | 3171 | void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); |
| 3172 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3173 | static inline int __sg_page_count(const struct scatterlist *sg) |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 3174 | { |
Chris Wilson | ee28637 | 2015-04-07 16:20:25 +0100 | [diff] [blame] | 3175 | return sg->length >> PAGE_SHIFT; |
Chris Wilson | 9da3da6 | 2012-06-01 15:20:22 +0100 | [diff] [blame] | 3176 | } |
Chris Wilson | ee28637 | 2015-04-07 16:20:25 +0100 | [diff] [blame] | 3177 | |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 3178 | struct scatterlist * |
| 3179 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, |
| 3180 | unsigned int n, unsigned int *offset); |
| 3181 | |
Dave Gordon | 033908a | 2015-12-10 18:51:23 +0000 | [diff] [blame] | 3182 | struct page * |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 3183 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, |
| 3184 | unsigned int n); |
Dave Gordon | 033908a | 2015-12-10 18:51:23 +0000 | [diff] [blame] | 3185 | |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 3186 | struct page * |
| 3187 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, |
| 3188 | unsigned int n); |
Chris Wilson | 341be1c | 2016-06-10 14:23:00 +0530 | [diff] [blame] | 3189 | |
Chris Wilson | 96d7763 | 2016-10-28 13:58:33 +0100 | [diff] [blame] | 3190 | dma_addr_t |
| 3191 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, |
| 3192 | unsigned long n); |
Chris Wilson | ee28637 | 2015-04-07 16:20:25 +0100 | [diff] [blame] | 3193 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 3194 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, |
Matthew Auld | a5c08166 | 2017-10-06 23:18:18 +0100 | [diff] [blame] | 3195 | struct sg_table *pages, |
Matthew Auld | 84e8978 | 2017-10-09 12:00:24 +0100 | [diff] [blame] | 3196 | unsigned int sg_page_sizes); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3197 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
| 3198 | |
| 3199 | static inline int __must_check |
| 3200 | i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 3201 | { |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3202 | might_lock(&obj->mm.lock); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3203 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3204 | if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3205 | return 0; |
| 3206 | |
| 3207 | return __i915_gem_object_get_pages(obj); |
| 3208 | } |
| 3209 | |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 3210 | static inline bool |
| 3211 | i915_gem_object_has_pages(struct drm_i915_gem_object *obj) |
| 3212 | { |
| 3213 | return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); |
| 3214 | } |
| 3215 | |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3216 | static inline void |
| 3217 | __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
| 3218 | { |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 3219 | GEM_BUG_ON(!i915_gem_object_has_pages(obj)); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3220 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3221 | atomic_inc(&obj->mm.pages_pin_count); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3222 | } |
| 3223 | |
| 3224 | static inline bool |
| 3225 | i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) |
| 3226 | { |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3227 | return atomic_read(&obj->mm.pages_pin_count); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3228 | } |
| 3229 | |
| 3230 | static inline void |
| 3231 | __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
| 3232 | { |
Chris Wilson | f1fa4f4 | 2017-10-13 21:26:13 +0100 | [diff] [blame] | 3233 | GEM_BUG_ON(!i915_gem_object_has_pages(obj)); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3234 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3235 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3236 | atomic_dec(&obj->mm.pages_pin_count); |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 3237 | } |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3238 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3239 | static inline void |
| 3240 | i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 3241 | { |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3242 | __i915_gem_object_unpin_pages(obj); |
Chris Wilson | a557017 | 2012-09-04 21:02:54 +0100 | [diff] [blame] | 3243 | } |
| 3244 | |
Chris Wilson | 548625e | 2016-11-01 12:11:34 +0000 | [diff] [blame] | 3245 | enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ |
| 3246 | I915_MM_NORMAL = 0, |
| 3247 | I915_MM_SHRINKER |
| 3248 | }; |
| 3249 | |
| 3250 | void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, |
| 3251 | enum i915_mm_subclass subclass); |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 3252 | void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 3253 | |
Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 3254 | enum i915_map_type { |
| 3255 | I915_MAP_WB = 0, |
| 3256 | I915_MAP_WC, |
Chris Wilson | a575c67 | 2017-08-28 11:46:31 +0100 | [diff] [blame] | 3257 | #define I915_MAP_OVERRIDE BIT(31) |
| 3258 | I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, |
| 3259 | I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, |
Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 3260 | }; |
| 3261 | |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3262 | /** |
| 3263 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object |
Chris Wilson | a73c7a4 | 2016-12-31 11:20:10 +0000 | [diff] [blame] | 3264 | * @obj: the object to map into kernel address space |
| 3265 | * @type: the type of mapping, used to select pgprot_t |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3266 | * |
| 3267 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's |
| 3268 | * pages and then returns a contiguous mapping of the backing storage into |
Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 3269 | * the kernel address space. Based on the @type of mapping, the PTE will be |
| 3270 | * set to either WriteBack or WriteCombine (via pgprot_t). |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3271 | * |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 3272 | * The caller is responsible for calling i915_gem_object_unpin_map() when the |
| 3273 | * mapping is no longer required. |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3274 | * |
Dave Gordon | 8305216 | 2016-04-12 14:46:16 +0100 | [diff] [blame] | 3275 | * Returns the pointer through which to access the mapped object, or an |
| 3276 | * ERR_PTR() on error. |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3277 | */ |
Chris Wilson | d31d7cb | 2016-08-12 12:39:58 +0100 | [diff] [blame] | 3278 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, |
| 3279 | enum i915_map_type type); |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3280 | |
| 3281 | /** |
| 3282 | * i915_gem_object_unpin_map - releases an earlier mapping |
Chris Wilson | a73c7a4 | 2016-12-31 11:20:10 +0000 | [diff] [blame] | 3283 | * @obj: the object to unmap |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3284 | * |
| 3285 | * After pinning the object and mapping its pages, once you are finished |
| 3286 | * with your access, call i915_gem_object_unpin_map() to release the pin |
| 3287 | * upon the mapping. Once the pin count reaches zero, that mapping may be |
| 3288 | * removed. |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3289 | */ |
| 3290 | static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) |
| 3291 | { |
Chris Wilson | 0a798eb | 2016-04-08 12:11:11 +0100 | [diff] [blame] | 3292 | i915_gem_object_unpin_pages(obj); |
| 3293 | } |
| 3294 | |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 3295 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, |
| 3296 | unsigned int *needs_clflush); |
| 3297 | int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, |
| 3298 | unsigned int *needs_clflush); |
Chris Wilson | 7f5f95d | 2017-03-10 00:09:42 +0000 | [diff] [blame] | 3299 | #define CLFLUSH_BEFORE BIT(0) |
| 3300 | #define CLFLUSH_AFTER BIT(1) |
| 3301 | #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) |
Chris Wilson | 43394c7 | 2016-08-18 17:16:47 +0100 | [diff] [blame] | 3302 | |
| 3303 | static inline void |
| 3304 | i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) |
| 3305 | { |
| 3306 | i915_gem_object_unpin_pages(obj); |
| 3307 | } |
| 3308 | |
Chris Wilson | 54cf91d | 2010-11-25 18:00:26 +0000 | [diff] [blame] | 3309 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
Ben Widawsky | e2d05a8 | 2013-09-24 09:57:58 -0700 | [diff] [blame] | 3310 | void i915_vma_move_to_active(struct i915_vma *vma, |
Chris Wilson | 5cf3d28 | 2016-08-04 07:52:43 +0100 | [diff] [blame] | 3311 | struct drm_i915_gem_request *req, |
| 3312 | unsigned int flags); |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 3313 | int i915_gem_dumb_create(struct drm_file *file_priv, |
| 3314 | struct drm_device *dev, |
| 3315 | struct drm_mode_create_dumb *args); |
Dave Airlie | da6b51d | 2014-12-24 13:11:17 +1000 | [diff] [blame] | 3316 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
| 3317 | uint32_t handle, uint64_t *offset); |
Chris Wilson | 4cc6907 | 2016-08-25 19:05:19 +0100 | [diff] [blame] | 3318 | int i915_gem_mmap_gtt_version(void); |
Dave Gordon | 85d1225 | 2016-05-20 11:54:06 +0100 | [diff] [blame] | 3319 | |
| 3320 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
| 3321 | struct drm_i915_gem_object *new, |
| 3322 | unsigned frontbuffer_bits); |
| 3323 | |
Chris Wilson | 73cb970 | 2016-10-28 13:58:46 +0100 | [diff] [blame] | 3324 | int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); |
Chris Wilson | 1690e1e | 2011-12-14 13:57:08 +0100 | [diff] [blame] | 3325 | |
Chris Wilson | 8d9fc7f | 2014-02-25 17:11:23 +0200 | [diff] [blame] | 3326 | struct drm_i915_gem_request * |
Tvrtko Ursulin | 0bc40be | 2016-03-16 11:00:37 +0000 | [diff] [blame] | 3327 | i915_gem_find_active_request(struct intel_engine_cs *engine); |
Chris Wilson | 8d9fc7f | 2014-02-25 17:11:23 +0200 | [diff] [blame] | 3328 | |
Chris Wilson | 67d97da | 2016-07-04 08:08:31 +0100 | [diff] [blame] | 3329 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv); |
Sourab Gupta | 84c33a6 | 2014-06-02 16:47:17 +0530 | [diff] [blame] | 3330 | |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 3331 | static inline bool i915_reset_backoff(struct i915_gpu_error *error) |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 3332 | { |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 3333 | return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); |
| 3334 | } |
| 3335 | |
| 3336 | static inline bool i915_reset_handoff(struct i915_gpu_error *error) |
| 3337 | { |
| 3338 | return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 3339 | } |
| 3340 | |
| 3341 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
| 3342 | { |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 3343 | return unlikely(test_bit(I915_WEDGED, &error->flags)); |
| 3344 | } |
| 3345 | |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 3346 | static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error) |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 3347 | { |
Chris Wilson | 8c185ec | 2017-03-16 17:13:02 +0000 | [diff] [blame] | 3348 | return i915_reset_backoff(error) | i915_terminally_wedged(error); |
Mika Kuoppala | 2ac0f45 | 2013-11-12 14:44:19 +0200 | [diff] [blame] | 3349 | } |
| 3350 | |
| 3351 | static inline u32 i915_reset_count(struct i915_gpu_error *error) |
| 3352 | { |
Chris Wilson | 8af29b0 | 2016-09-09 14:11:47 +0100 | [diff] [blame] | 3353 | return READ_ONCE(error->reset_count); |
Daniel Vetter | 1f83fee | 2012-11-15 17:17:22 +0100 | [diff] [blame] | 3354 | } |
Chris Wilson | a71d8d9 | 2012-02-15 11:25:36 +0000 | [diff] [blame] | 3355 | |
Michel Thierry | 702c8f8 | 2017-06-20 10:57:48 +0100 | [diff] [blame] | 3356 | static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, |
| 3357 | struct intel_engine_cs *engine) |
| 3358 | { |
| 3359 | return READ_ONCE(error->reset_engine_count[engine->id]); |
| 3360 | } |
| 3361 | |
Michel Thierry | a1ef70e | 2017-06-20 10:57:47 +0100 | [diff] [blame] | 3362 | struct drm_i915_gem_request * |
| 3363 | i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); |
Chris Wilson | 0e178ae | 2017-01-17 17:59:06 +0200 | [diff] [blame] | 3364 | int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); |
Chris Wilson | d802709 | 2017-02-08 14:30:32 +0000 | [diff] [blame] | 3365 | void i915_gem_reset(struct drm_i915_private *dev_priv); |
Michel Thierry | a1ef70e | 2017-06-20 10:57:47 +0100 | [diff] [blame] | 3366 | void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); |
Chris Wilson | b1ed35d | 2017-01-04 14:51:10 +0000 | [diff] [blame] | 3367 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv); |
Chris Wilson | 821ed7d | 2016-09-09 14:11:53 +0100 | [diff] [blame] | 3368 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv); |
Chris Wilson | 2e8f9d3 | 2017-03-16 17:13:04 +0000 | [diff] [blame] | 3369 | bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); |
Michel Thierry | a1ef70e | 2017-06-20 10:57:47 +0100 | [diff] [blame] | 3370 | void i915_gem_reset_engine(struct intel_engine_cs *engine, |
| 3371 | struct drm_i915_gem_request *request); |
Chris Wilson | 57822dc | 2017-02-22 11:40:48 +0000 | [diff] [blame] | 3372 | |
Chris Wilson | 2414551 | 2017-01-24 11:01:35 +0000 | [diff] [blame] | 3373 | void i915_gem_init_mmio(struct drm_i915_private *i915); |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 3374 | int __must_check i915_gem_init(struct drm_i915_private *dev_priv); |
| 3375 | int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | c6be607 | 2016-11-16 08:55:31 +0000 | [diff] [blame] | 3376 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | cb15d9f | 2016-12-01 14:16:39 +0000 | [diff] [blame] | 3377 | void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); |
Chris Wilson | 496b575 | 2017-02-13 17:15:58 +0000 | [diff] [blame] | 3378 | int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, |
| 3379 | unsigned int flags); |
Tvrtko Ursulin | bf9e842 | 2016-12-01 14:16:38 +0000 | [diff] [blame] | 3380 | int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); |
| 3381 | void i915_gem_resume(struct drm_i915_private *dev_priv); |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 3382 | int i915_gem_fault(struct vm_fault *vmf); |
Chris Wilson | e95433c | 2016-10-28 13:58:27 +0100 | [diff] [blame] | 3383 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, |
| 3384 | unsigned int flags, |
| 3385 | long timeout, |
| 3386 | struct intel_rps_client *rps); |
Chris Wilson | 6b5e90f | 2016-11-14 20:41:05 +0000 | [diff] [blame] | 3387 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, |
| 3388 | unsigned int flags, |
| 3389 | int priority); |
| 3390 | #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX |
| 3391 | |
Chris Wilson | 2e2f351 | 2015-04-27 13:41:14 +0100 | [diff] [blame] | 3392 | int __must_check |
Chris Wilson | e22d8e3 | 2017-04-12 12:01:11 +0100 | [diff] [blame] | 3393 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); |
| 3394 | int __must_check |
| 3395 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); |
Chris Wilson | 2021746 | 2010-11-23 15:26:33 +0000 | [diff] [blame] | 3396 | int __must_check |
Chris Wilson | dabdfe0 | 2012-03-26 10:10:27 +0200 | [diff] [blame] | 3397 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3398 | struct i915_vma * __must_check |
Chris Wilson | 2da3b9b | 2011-04-14 09:41:17 +0100 | [diff] [blame] | 3399 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
| 3400 | u32 alignment, |
Tvrtko Ursulin | e661733 | 2015-03-23 11:10:33 +0000 | [diff] [blame] | 3401 | const struct i915_ggtt_view *view); |
Chris Wilson | 058d88c | 2016-08-15 10:49:06 +0100 | [diff] [blame] | 3402 | void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); |
Chris Wilson | 0073115 | 2014-05-21 12:42:56 +0100 | [diff] [blame] | 3403 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
Chris Wilson | 6eeefaf | 2010-08-07 11:01:39 +0100 | [diff] [blame] | 3404 | int align); |
Chris Wilson | 829a0af | 2017-06-20 12:05:45 +0100 | [diff] [blame] | 3405 | int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); |
Chris Wilson | 05394f3 | 2010-11-08 19:18:58 +0000 | [diff] [blame] | 3406 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3407 | |
Chris Wilson | e4ffd17 | 2011-04-04 09:44:39 +0100 | [diff] [blame] | 3408 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
| 3409 | enum i915_cache_level cache_level); |
| 3410 | |
Daniel Vetter | 1286ff7 | 2012-05-10 15:25:09 +0200 | [diff] [blame] | 3411 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
| 3412 | struct dma_buf *dma_buf); |
| 3413 | |
| 3414 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
| 3415 | struct drm_gem_object *gem_obj, int flags); |
| 3416 | |
Daniel Vetter | 841cd77 | 2014-08-06 15:04:48 +0200 | [diff] [blame] | 3417 | static inline struct i915_hw_ppgtt * |
| 3418 | i915_vm_to_ppgtt(struct i915_address_space *vm) |
| 3419 | { |
Daniel Vetter | 841cd77 | 2014-08-06 15:04:48 +0200 | [diff] [blame] | 3420 | return container_of(vm, struct i915_hw_ppgtt, base); |
| 3421 | } |
| 3422 | |
Joonas Lahtinen | b42fe9c | 2016-11-11 12:43:54 +0200 | [diff] [blame] | 3423 | /* i915_gem_fence_reg.c */ |
Changbin Du | 969b095 | 2017-09-04 16:01:01 +0800 | [diff] [blame] | 3424 | struct drm_i915_fence_reg * |
| 3425 | i915_reserve_fence(struct drm_i915_private *dev_priv); |
| 3426 | void i915_unreserve_fence(struct drm_i915_fence_reg *fence); |
Daniel Vetter | 41a36b7 | 2015-07-24 13:55:11 +0200 | [diff] [blame] | 3427 | |
Chris Wilson | b1ed35d | 2017-01-04 14:51:10 +0000 | [diff] [blame] | 3428 | void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 3429 | void i915_gem_restore_fences(struct drm_i915_private *dev_priv); |
Daniel Vetter | 41a36b7 | 2015-07-24 13:55:11 +0200 | [diff] [blame] | 3430 | |
Tvrtko Ursulin | 4362f4f | 2016-11-16 08:55:33 +0000 | [diff] [blame] | 3431 | void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 3432 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, |
| 3433 | struct sg_table *pages); |
| 3434 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, |
| 3435 | struct sg_table *pages); |
Daniel Vetter | 7f96eca | 2015-07-24 17:40:14 +0200 | [diff] [blame] | 3436 | |
Chris Wilson | ca585b5 | 2016-05-24 14:53:36 +0100 | [diff] [blame] | 3437 | static inline struct i915_gem_context * |
Chris Wilson | 1acfc10 | 2017-06-20 12:05:47 +0100 | [diff] [blame] | 3438 | __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) |
| 3439 | { |
| 3440 | return idr_find(&file_priv->context_idr, id); |
| 3441 | } |
| 3442 | |
| 3443 | static inline struct i915_gem_context * |
Chris Wilson | ca585b5 | 2016-05-24 14:53:36 +0100 | [diff] [blame] | 3444 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) |
| 3445 | { |
| 3446 | struct i915_gem_context *ctx; |
| 3447 | |
Chris Wilson | 1acfc10 | 2017-06-20 12:05:47 +0100 | [diff] [blame] | 3448 | rcu_read_lock(); |
| 3449 | ctx = __i915_gem_context_lookup_rcu(file_priv, id); |
| 3450 | if (ctx && !kref_get_unless_zero(&ctx->ref)) |
| 3451 | ctx = NULL; |
| 3452 | rcu_read_unlock(); |
Chris Wilson | ca585b5 | 2016-05-24 14:53:36 +0100 | [diff] [blame] | 3453 | |
| 3454 | return ctx; |
| 3455 | } |
| 3456 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 3457 | static inline struct intel_timeline * |
| 3458 | i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, |
| 3459 | struct intel_engine_cs *engine) |
| 3460 | { |
| 3461 | struct i915_address_space *vm; |
| 3462 | |
| 3463 | vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; |
| 3464 | return &vm->timeline.engine[engine->id]; |
| 3465 | } |
| 3466 | |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 3467 | int i915_perf_open_ioctl(struct drm_device *dev, void *data, |
| 3468 | struct drm_file *file); |
Lionel Landwerlin | f89823c | 2017-08-03 18:05:50 +0100 | [diff] [blame] | 3469 | int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, |
| 3470 | struct drm_file *file); |
| 3471 | int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, |
| 3472 | struct drm_file *file); |
Robert Bragg | 19f81df | 2017-06-13 12:23:03 +0100 | [diff] [blame] | 3473 | void i915_oa_init_reg_state(struct intel_engine_cs *engine, |
| 3474 | struct i915_gem_context *ctx, |
| 3475 | uint32_t *reg_state); |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 3476 | |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 3477 | /* i915_gem_evict.c */ |
Chris Wilson | e522ac23 | 2016-08-04 16:32:18 +0100 | [diff] [blame] | 3478 | int __must_check i915_gem_evict_something(struct i915_address_space *vm, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 3479 | u64 min_size, u64 alignment, |
Chris Wilson | 42d6ab4 | 2012-07-26 11:49:32 +0100 | [diff] [blame] | 3480 | unsigned cache_level, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 3481 | u64 start, u64 end, |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 3482 | unsigned flags); |
Chris Wilson | 625d988 | 2017-01-11 11:23:11 +0000 | [diff] [blame] | 3483 | int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, |
| 3484 | struct drm_mm_node *node, |
| 3485 | unsigned int flags); |
Chris Wilson | 2889caa | 2017-06-16 15:05:19 +0100 | [diff] [blame] | 3486 | int i915_gem_evict_vm(struct i915_address_space *vm); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 3487 | |
Chris Wilson | 7125397b | 2017-12-06 12:49:14 +0000 | [diff] [blame] | 3488 | void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv); |
| 3489 | |
Ben Widawsky | 0260c42 | 2014-03-22 22:47:21 -0700 | [diff] [blame] | 3490 | /* belongs in i915_gem_gtt.h */ |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3491 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3492 | { |
Chris Wilson | 600f436 | 2016-08-18 17:16:40 +0100 | [diff] [blame] | 3493 | wmb(); |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3494 | if (INTEL_GEN(dev_priv) < 6) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3495 | intel_gtt_chipset_flush(); |
| 3496 | } |
Ben Widawsky | 246cbfb | 2013-12-06 14:11:14 -0800 | [diff] [blame] | 3497 | |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 3498 | /* i915_gem_stolen.c */ |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 3499 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
| 3500 | struct drm_mm_node *node, u64 size, |
| 3501 | unsigned alignment); |
Paulo Zanoni | a9da512 | 2015-09-14 15:19:57 -0300 | [diff] [blame] | 3502 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
| 3503 | struct drm_mm_node *node, u64 size, |
| 3504 | unsigned alignment, u64 start, |
| 3505 | u64 end); |
Paulo Zanoni | d713fd4 | 2015-07-02 19:25:07 -0300 | [diff] [blame] | 3506 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
| 3507 | struct drm_mm_node *node); |
Tvrtko Ursulin | 7ace3d3 | 2016-11-16 08:55:35 +0000 | [diff] [blame] | 3508 | int i915_gem_init_stolen(struct drm_i915_private *dev_priv); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 3509 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
Chris Wilson | 0104fdb | 2012-11-15 11:32:26 +0000 | [diff] [blame] | 3510 | struct drm_i915_gem_object * |
Matthew Auld | b7128ef | 2017-12-11 15:18:22 +0000 | [diff] [blame] | 3511 | i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, |
| 3512 | resource_size_t size); |
Chris Wilson | 866d12b | 2013-02-19 13:31:37 -0800 | [diff] [blame] | 3513 | struct drm_i915_gem_object * |
Tvrtko Ursulin | 187685c | 2016-12-01 14:16:36 +0000 | [diff] [blame] | 3514 | i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, |
Matthew Auld | b7128ef | 2017-12-11 15:18:22 +0000 | [diff] [blame] | 3515 | resource_size_t stolen_offset, |
| 3516 | resource_size_t gtt_offset, |
| 3517 | resource_size_t size); |
Chris Wilson | 9797fbf | 2012-04-24 15:47:39 +0100 | [diff] [blame] | 3518 | |
Chris Wilson | 920cf41 | 2016-10-28 13:58:30 +0100 | [diff] [blame] | 3519 | /* i915_gem_internal.c */ |
| 3520 | struct drm_i915_gem_object * |
| 3521 | i915_gem_object_create_internal(struct drm_i915_private *dev_priv, |
Chris Wilson | fcd46e5 | 2017-01-12 13:04:31 +0000 | [diff] [blame] | 3522 | phys_addr_t size); |
Chris Wilson | 920cf41 | 2016-10-28 13:58:30 +0100 | [diff] [blame] | 3523 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 3524 | /* i915_gem_shrinker.c */ |
Chris Wilson | 56fa4bf | 2017-11-23 11:53:38 +0000 | [diff] [blame] | 3525 | unsigned long i915_gem_shrink(struct drm_i915_private *i915, |
Chris Wilson | 1438754 | 2015-10-01 12:18:25 +0100 | [diff] [blame] | 3526 | unsigned long target, |
Chris Wilson | 912d572 | 2017-09-06 16:19:30 -0700 | [diff] [blame] | 3527 | unsigned long *nr_scanned, |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 3528 | unsigned flags); |
| 3529 | #define I915_SHRINK_PURGEABLE 0x1 |
| 3530 | #define I915_SHRINK_UNBOUND 0x2 |
| 3531 | #define I915_SHRINK_BOUND 0x4 |
Chris Wilson | 5763ff0 | 2015-10-01 12:18:29 +0100 | [diff] [blame] | 3532 | #define I915_SHRINK_ACTIVE 0x8 |
Chris Wilson | eae2c43 | 2016-04-08 12:11:12 +0100 | [diff] [blame] | 3533 | #define I915_SHRINK_VMAPS 0x10 |
Chris Wilson | 56fa4bf | 2017-11-23 11:53:38 +0000 | [diff] [blame] | 3534 | unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); |
| 3535 | void i915_gem_shrinker_register(struct drm_i915_private *i915); |
| 3536 | void i915_gem_shrinker_unregister(struct drm_i915_private *i915); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 3537 | |
| 3538 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3539 | /* i915_gem_tiling.c */ |
Chris Wilson | 2c1792a | 2013-08-01 18:39:55 +0100 | [diff] [blame] | 3540 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 3541 | { |
Chris Wilson | 091387c | 2016-06-24 14:00:21 +0100 | [diff] [blame] | 3542 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 3543 | |
| 3544 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
Chris Wilson | 3e510a8 | 2016-08-05 10:14:23 +0100 | [diff] [blame] | 3545 | i915_gem_object_is_tiled(obj); |
Chris Wilson | e9b73c6 | 2012-12-03 21:03:14 +0000 | [diff] [blame] | 3546 | } |
| 3547 | |
Chris Wilson | 91d4e0aa | 2017-01-09 16:16:13 +0000 | [diff] [blame] | 3548 | u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, |
| 3549 | unsigned int tiling, unsigned int stride); |
| 3550 | u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, |
| 3551 | unsigned int tiling, unsigned int stride); |
| 3552 | |
Ben Gamari | 2017263 | 2009-02-17 20:08:50 -0500 | [diff] [blame] | 3553 | /* i915_debugfs.c */ |
Daniel Vetter | f8c168f | 2013-10-16 11:49:58 +0200 | [diff] [blame] | 3554 | #ifdef CONFIG_DEBUG_FS |
Chris Wilson | 1dac891 | 2016-06-24 14:00:17 +0100 | [diff] [blame] | 3555 | int i915_debugfs_register(struct drm_i915_private *dev_priv); |
Jani Nikula | 249e87d | 2015-04-10 16:59:32 +0300 | [diff] [blame] | 3556 | int i915_debugfs_connector_add(struct drm_connector *connector); |
David Weinehall | 36cdd01 | 2016-08-22 13:59:31 +0300 | [diff] [blame] | 3557 | void intel_display_crc_init(struct drm_i915_private *dev_priv); |
Damien Lespiau | 0714442 | 2013-10-15 18:55:40 +0100 | [diff] [blame] | 3558 | #else |
Chris Wilson | 8d35acb | 2016-07-12 12:55:29 +0100 | [diff] [blame] | 3559 | static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} |
Daniel Vetter | 101057f | 2015-07-13 09:23:19 +0200 | [diff] [blame] | 3560 | static inline int i915_debugfs_connector_add(struct drm_connector *connector) |
| 3561 | { return 0; } |
Maarten Lankhorst | ce5e2ac | 2016-08-25 11:07:01 +0200 | [diff] [blame] | 3562 | static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} |
Damien Lespiau | 0714442 | 2013-10-15 18:55:40 +0100 | [diff] [blame] | 3563 | #endif |
Mika Kuoppala | 84734a0 | 2013-07-12 16:50:57 +0300 | [diff] [blame] | 3564 | |
| 3565 | /* i915_gpu_error.c */ |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 3566 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
| 3567 | |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 3568 | __printf(2, 3) |
| 3569 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); |
Mika Kuoppala | fc16b48 | 2013-06-06 15:18:39 +0300 | [diff] [blame] | 3570 | int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 3571 | const struct i915_gpu_state *gpu); |
Mika Kuoppala | 4dc955f | 2013-06-06 15:18:41 +0300 | [diff] [blame] | 3572 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, |
Chris Wilson | 0a4cd7c | 2014-08-22 14:41:39 +0100 | [diff] [blame] | 3573 | struct drm_i915_private *i915, |
Mika Kuoppala | 4dc955f | 2013-06-06 15:18:41 +0300 | [diff] [blame] | 3574 | size_t count, loff_t pos); |
| 3575 | static inline void i915_error_state_buf_release( |
| 3576 | struct drm_i915_error_state_buf *eb) |
| 3577 | { |
| 3578 | kfree(eb->buf); |
| 3579 | } |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 3580 | |
| 3581 | struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3582 | void i915_capture_error_state(struct drm_i915_private *dev_priv, |
| 3583 | u32 engine_mask, |
Mika Kuoppala | 5817446 | 2014-02-25 17:11:26 +0200 | [diff] [blame] | 3584 | const char *error_msg); |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 3585 | |
| 3586 | static inline struct i915_gpu_state * |
| 3587 | i915_gpu_state_get(struct i915_gpu_state *gpu) |
| 3588 | { |
| 3589 | kref_get(&gpu->ref); |
| 3590 | return gpu; |
| 3591 | } |
| 3592 | |
| 3593 | void __i915_gpu_state_free(struct kref *kref); |
| 3594 | static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) |
| 3595 | { |
| 3596 | if (gpu) |
| 3597 | kref_put(&gpu->ref, __i915_gpu_state_free); |
| 3598 | } |
| 3599 | |
| 3600 | struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); |
| 3601 | void i915_reset_error_state(struct drm_i915_private *i915); |
Mika Kuoppala | 84734a0 | 2013-07-12 16:50:57 +0300 | [diff] [blame] | 3602 | |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 3603 | #else |
| 3604 | |
| 3605 | static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, |
| 3606 | u32 engine_mask, |
| 3607 | const char *error_msg) |
| 3608 | { |
| 3609 | } |
| 3610 | |
Chris Wilson | 5a4c6f1 | 2017-02-14 16:46:11 +0000 | [diff] [blame] | 3611 | static inline struct i915_gpu_state * |
| 3612 | i915_first_error_state(struct drm_i915_private *i915) |
| 3613 | { |
| 3614 | return NULL; |
| 3615 | } |
| 3616 | |
| 3617 | static inline void i915_reset_error_state(struct drm_i915_private *i915) |
Chris Wilson | 98a2f41 | 2016-10-12 10:05:18 +0100 | [diff] [blame] | 3618 | { |
| 3619 | } |
| 3620 | |
| 3621 | #endif |
| 3622 | |
Chris Wilson | 0a4cd7c | 2014-08-22 14:41:39 +0100 | [diff] [blame] | 3623 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type); |
Ben Gamari | 2017263 | 2009-02-17 20:08:50 -0500 | [diff] [blame] | 3624 | |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 3625 | /* i915_cmd_parser.c */ |
Chris Wilson | 1ca3712 | 2016-05-04 14:25:36 +0100 | [diff] [blame] | 3626 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); |
Chris Wilson | 7756e45 | 2016-08-18 17:17:10 +0100 | [diff] [blame] | 3627 | void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 3628 | void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); |
Chris Wilson | 33a051a | 2016-07-27 09:07:26 +0100 | [diff] [blame] | 3629 | int intel_engine_cmd_parser(struct intel_engine_cs *engine, |
| 3630 | struct drm_i915_gem_object *batch_obj, |
| 3631 | struct drm_i915_gem_object *shadow_batch_obj, |
| 3632 | u32 batch_start_offset, |
| 3633 | u32 batch_len, |
| 3634 | bool is_master); |
Brad Volkin | 351e3db | 2014-02-18 10:15:46 -0800 | [diff] [blame] | 3635 | |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 3636 | /* i915_perf.c */ |
| 3637 | extern void i915_perf_init(struct drm_i915_private *dev_priv); |
| 3638 | extern void i915_perf_fini(struct drm_i915_private *dev_priv); |
Robert Bragg | 442b8c0 | 2016-11-07 19:49:53 +0000 | [diff] [blame] | 3639 | extern void i915_perf_register(struct drm_i915_private *dev_priv); |
| 3640 | extern void i915_perf_unregister(struct drm_i915_private *dev_priv); |
Robert Bragg | eec688e | 2016-11-07 19:49:47 +0000 | [diff] [blame] | 3641 | |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 3642 | /* i915_suspend.c */ |
Tvrtko Ursulin | af6dc74 | 2016-12-01 14:16:44 +0000 | [diff] [blame] | 3643 | extern int i915_save_state(struct drm_i915_private *dev_priv); |
| 3644 | extern int i915_restore_state(struct drm_i915_private *dev_priv); |
Jesse Barnes | 317c35d | 2008-08-25 15:11:06 -0700 | [diff] [blame] | 3645 | |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 3646 | /* i915_sysfs.c */ |
David Weinehall | 694c282 | 2016-08-22 13:32:43 +0300 | [diff] [blame] | 3647 | void i915_setup_sysfs(struct drm_i915_private *dev_priv); |
| 3648 | void i915_teardown_sysfs(struct drm_i915_private *dev_priv); |
Ben Widawsky | 0136db5 | 2012-04-10 21:17:01 -0700 | [diff] [blame] | 3649 | |
Jerome Anand | eef5732 | 2017-01-25 04:27:49 +0530 | [diff] [blame] | 3650 | /* intel_lpe_audio.c */ |
| 3651 | int intel_lpe_audio_init(struct drm_i915_private *dev_priv); |
| 3652 | void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); |
| 3653 | void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); |
Jerome Anand | 46d196e | 2017-01-25 04:27:50 +0530 | [diff] [blame] | 3654 | void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, |
Ville Syrjälä | 20be551 | 2017-04-27 19:02:26 +0300 | [diff] [blame] | 3655 | enum pipe pipe, enum port port, |
| 3656 | const void *eld, int ls_clock, bool dp_output); |
Jerome Anand | eef5732 | 2017-01-25 04:27:49 +0530 | [diff] [blame] | 3657 | |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 3658 | /* intel_i2c.c */ |
Tvrtko Ursulin | 4019644 | 2016-12-01 14:16:42 +0000 | [diff] [blame] | 3659 | extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); |
| 3660 | extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); |
Jani Nikula | 88ac793 | 2015-03-27 00:20:22 +0200 | [diff] [blame] | 3661 | extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, |
| 3662 | unsigned int pin); |
Daniel Kurtz | 3bd7d90 | 2012-03-28 02:36:14 +0800 | [diff] [blame] | 3663 | |
Jani Nikula | 0184df46 | 2015-03-27 00:20:20 +0200 | [diff] [blame] | 3664 | extern struct i2c_adapter * |
| 3665 | intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); |
Chris Wilson | e957d77 | 2010-09-24 12:52:03 +0100 | [diff] [blame] | 3666 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
| 3667 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
Jan-Simon Möller | 8f375e1 | 2013-05-06 14:52:08 +0200 | [diff] [blame] | 3668 | static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
Chris Wilson | b8232e9 | 2010-09-28 16:41:32 +0100 | [diff] [blame] | 3669 | { |
| 3670 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; |
| 3671 | } |
Tvrtko Ursulin | af6dc74 | 2016-12-01 14:16:44 +0000 | [diff] [blame] | 3672 | extern void intel_i2c_reset(struct drm_i915_private *dev_priv); |
Chris Wilson | f899fc6 | 2010-07-20 15:44:45 -0700 | [diff] [blame] | 3673 | |
Jani Nikula | 8b8e1a8 | 2015-12-14 12:50:49 +0200 | [diff] [blame] | 3674 | /* intel_bios.c */ |
Jani Nikula | 6657885 | 2017-03-10 15:27:57 +0200 | [diff] [blame] | 3675 | void intel_bios_init(struct drm_i915_private *dev_priv); |
Jani Nikula | f0067a3 | 2015-12-15 13:16:15 +0200 | [diff] [blame] | 3676 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
Jani Nikula | 3bdd14d | 2016-03-16 12:43:29 +0200 | [diff] [blame] | 3677 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
Jani Nikula | 5a69d13 | 2016-03-16 12:43:30 +0200 | [diff] [blame] | 3678 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
Ville Syrjälä | 22f35042 | 2016-06-03 12:17:43 +0300 | [diff] [blame] | 3679 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); |
Jani Nikula | 951d9ef | 2016-03-16 12:43:31 +0200 | [diff] [blame] | 3680 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
Ville Syrjälä | d619925 | 2016-05-04 14:45:22 +0300 | [diff] [blame] | 3681 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); |
Jani Nikula | 7137aec | 2016-03-16 12:43:32 +0200 | [diff] [blame] | 3682 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
Shubhangi Shrivastava | d252bf6 | 2016-03-31 16:11:47 +0530 | [diff] [blame] | 3683 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, |
| 3684 | enum port port); |
Shashank Sharma | 6389dd8 | 2016-10-14 19:56:50 +0530 | [diff] [blame] | 3685 | bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, |
| 3686 | enum port port); |
| 3687 | |
Jesse Barnes | 723bfd7 | 2010-10-07 16:01:13 -0700 | [diff] [blame] | 3688 | /* intel_acpi.c */ |
| 3689 | #ifdef CONFIG_ACPI |
| 3690 | extern void intel_register_dsm_handler(void); |
| 3691 | extern void intel_unregister_dsm_handler(void); |
| 3692 | #else |
| 3693 | static inline void intel_register_dsm_handler(void) { return; } |
| 3694 | static inline void intel_unregister_dsm_handler(void) { return; } |
| 3695 | #endif /* CONFIG_ACPI */ |
| 3696 | |
Chris Wilson | 94b4f3b | 2016-07-05 10:40:20 +0100 | [diff] [blame] | 3697 | /* intel_device_info.c */ |
| 3698 | static inline struct intel_device_info * |
| 3699 | mkwrite_device_info(struct drm_i915_private *dev_priv) |
| 3700 | { |
| 3701 | return (struct intel_device_info *)&dev_priv->info; |
| 3702 | } |
| 3703 | |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 3704 | /* modesetting */ |
Daniel Vetter | f817586 | 2012-04-10 15:50:11 +0200 | [diff] [blame] | 3705 | extern void intel_modeset_init_hw(struct drm_device *dev); |
Ville Syrjälä | b079bd17 | 2016-10-25 18:58:02 +0300 | [diff] [blame] | 3706 | extern int intel_modeset_init(struct drm_device *dev); |
Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 3707 | extern void intel_modeset_cleanup(struct drm_device *dev); |
Chris Wilson | 1ebaa0b | 2016-06-24 14:00:15 +0100 | [diff] [blame] | 3708 | extern int intel_connector_register(struct drm_connector *); |
Chris Wilson | c191eca | 2016-06-17 11:40:33 +0100 | [diff] [blame] | 3709 | extern void intel_connector_unregister(struct drm_connector *); |
Tvrtko Ursulin | 6315b5d | 2016-11-16 12:32:42 +0000 | [diff] [blame] | 3710 | extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, |
| 3711 | bool state); |
Maarten Lankhorst | 043e9bd | 2015-07-13 16:30:25 +0200 | [diff] [blame] | 3712 | extern void intel_display_resume(struct drm_device *dev); |
Tvrtko Ursulin | 29b74b7 | 2016-11-16 08:55:39 +0000 | [diff] [blame] | 3713 | extern void i915_redisable_vga(struct drm_i915_private *dev_priv); |
| 3714 | extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); |
Tvrtko Ursulin | 91d1425 | 2016-05-06 14:48:28 +0100 | [diff] [blame] | 3715 | extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); |
Ander Conselvan de Oliveira | c39055b | 2016-11-23 16:21:44 +0200 | [diff] [blame] | 3716 | extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); |
Chris Wilson | 9fcee2f | 2017-01-26 10:19:19 +0000 | [diff] [blame] | 3717 | extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); |
Ville Syrjälä | 11a85d6 | 2016-11-28 19:37:12 +0200 | [diff] [blame] | 3718 | extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
Imre Deak | 5209b1f | 2014-07-01 12:36:17 +0300 | [diff] [blame] | 3719 | bool enable); |
Zhenyu Wang | 3bad078 | 2010-04-07 16:15:53 +0800 | [diff] [blame] | 3720 | |
Ben Widawsky | c0c7bab | 2012-07-12 11:01:05 -0700 | [diff] [blame] | 3721 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
| 3722 | struct drm_file *file); |
Jesse Barnes | 575155a | 2012-03-28 13:39:37 -0700 | [diff] [blame] | 3723 | |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 3724 | /* overlay */ |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3725 | extern struct intel_overlay_error_state * |
| 3726 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 3727 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
| 3728 | struct intel_overlay_error_state *error); |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 3729 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 3730 | extern struct intel_display_error_state * |
| 3731 | intel_display_capture_error_state(struct drm_i915_private *dev_priv); |
Mika Kuoppala | edc3d88 | 2013-05-23 13:55:35 +0300 | [diff] [blame] | 3732 | extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, |
Chris Wilson | c4a1d9e | 2010-11-21 13:12:35 +0000 | [diff] [blame] | 3733 | struct intel_display_error_state *error); |
Chris Wilson | 6ef3d42 | 2010-08-04 20:26:07 +0100 | [diff] [blame] | 3734 | |
Tom O'Rourke | 151a49d | 2014-11-13 18:50:10 -0800 | [diff] [blame] | 3735 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); |
Imre Deak | e76019a | 2018-01-30 16:29:38 +0200 | [diff] [blame] | 3736 | int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, |
Imre Deak | 006bb4c | 2018-01-30 16:29:39 +0200 | [diff] [blame] | 3737 | u32 val, int fast_timeout_us, |
| 3738 | int slow_timeout_ms); |
Imre Deak | e76019a | 2018-01-30 16:29:38 +0200 | [diff] [blame] | 3739 | #define sandybridge_pcode_write(dev_priv, mbox, val) \ |
Imre Deak | 006bb4c | 2018-01-30 16:29:39 +0200 | [diff] [blame] | 3740 | sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0) |
Imre Deak | e76019a | 2018-01-30 16:29:38 +0200 | [diff] [blame] | 3741 | |
Imre Deak | a0b8a1f | 2016-12-05 18:27:37 +0200 | [diff] [blame] | 3742 | int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, |
| 3743 | u32 reply_mask, u32 reply, int timeout_base_ms); |
Jani Nikula | 59de081 | 2013-05-22 15:36:16 +0300 | [diff] [blame] | 3744 | |
| 3745 | /* intel_sideband.c */ |
Deepak S | 707b6e3 | 2015-01-16 20:42:17 +0530 | [diff] [blame] | 3746 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); |
Chris Wilson | 9fcee2f | 2017-01-26 10:19:19 +0000 | [diff] [blame] | 3747 | int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); |
Jani Nikula | 6493625 | 2013-05-22 15:36:20 +0300 | [diff] [blame] | 3748 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); |
Deepak M | dfb19ed | 2016-02-04 18:55:15 +0200 | [diff] [blame] | 3749 | u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); |
| 3750 | void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); |
Jani Nikula | e9f882a | 2013-08-27 15:12:14 +0300 | [diff] [blame] | 3751 | u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); |
| 3752 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
| 3753 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); |
| 3754 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
Jesse Barnes | f341915 | 2013-11-04 11:52:44 -0800 | [diff] [blame] | 3755 | u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); |
| 3756 | void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
Chon Ming Lee | 5e69f97 | 2013-09-05 20:41:49 +0800 | [diff] [blame] | 3757 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); |
| 3758 | void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); |
Jani Nikula | 59de081 | 2013-05-22 15:36:16 +0300 | [diff] [blame] | 3759 | u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
| 3760 | enum intel_sbi_destination destination); |
| 3761 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
| 3762 | enum intel_sbi_destination destination); |
Shobhit Kumar | e9fe51c | 2013-12-10 12:14:55 +0530 | [diff] [blame] | 3763 | u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); |
| 3764 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
Jesse Barnes | 0a073b8 | 2013-04-17 15:54:58 -0700 | [diff] [blame] | 3765 | |
Ander Conselvan de Oliveira | b7fa22d | 2016-04-27 15:44:17 +0300 | [diff] [blame] | 3766 | /* intel_dpio_phy.c */ |
Ander Conselvan de Oliveira | 0a116ce | 2016-12-02 10:23:51 +0200 | [diff] [blame] | 3767 | void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, |
Ander Conselvan de Oliveira | ed37892 | 2016-10-19 10:59:00 +0300 | [diff] [blame] | 3768 | enum dpio_phy *phy, enum dpio_channel *ch); |
Ander Conselvan de Oliveira | b6e0820 | 2016-10-06 19:22:19 +0300 | [diff] [blame] | 3769 | void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, |
| 3770 | enum port port, u32 margin, u32 scale, |
| 3771 | u32 enable, u32 deemphasis); |
Ander Conselvan de Oliveira | 47a6bc6 | 2016-10-06 19:22:17 +0300 | [diff] [blame] | 3772 | void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); |
| 3773 | void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); |
| 3774 | bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, |
| 3775 | enum dpio_phy phy); |
| 3776 | bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, |
| 3777 | enum dpio_phy phy); |
Ville Syrjälä | 5161d05 | 2017-10-27 16:43:48 +0300 | [diff] [blame] | 3778 | uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count); |
Ander Conselvan de Oliveira | 47a6bc6 | 2016-10-06 19:22:17 +0300 | [diff] [blame] | 3779 | void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, |
| 3780 | uint8_t lane_lat_optim_mask); |
| 3781 | uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); |
| 3782 | |
Ander Conselvan de Oliveira | b7fa22d | 2016-04-27 15:44:17 +0300 | [diff] [blame] | 3783 | void chv_set_phy_signal_level(struct intel_encoder *encoder, |
| 3784 | u32 deemph_reg_value, u32 margin_reg_value, |
| 3785 | bool uniq_trans_scale); |
Ander Conselvan de Oliveira | 844b2f9 | 2016-04-27 15:44:18 +0300 | [diff] [blame] | 3786 | void chv_data_lane_soft_reset(struct intel_encoder *encoder, |
Ville Syrjälä | 2e1029c | 2017-10-31 22:51:18 +0200 | [diff] [blame] | 3787 | const struct intel_crtc_state *crtc_state, |
Ander Conselvan de Oliveira | 844b2f9 | 2016-04-27 15:44:18 +0300 | [diff] [blame] | 3788 | bool reset); |
Ville Syrjälä | 2e1029c | 2017-10-31 22:51:18 +0200 | [diff] [blame] | 3789 | void chv_phy_pre_pll_enable(struct intel_encoder *encoder, |
| 3790 | const struct intel_crtc_state *crtc_state); |
| 3791 | void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, |
| 3792 | const struct intel_crtc_state *crtc_state); |
Ander Conselvan de Oliveira | e7d2a717 | 2016-04-27 15:44:20 +0300 | [diff] [blame] | 3793 | void chv_phy_release_cl2_override(struct intel_encoder *encoder); |
Ville Syrjälä | 2e1029c | 2017-10-31 22:51:18 +0200 | [diff] [blame] | 3794 | void chv_phy_post_pll_disable(struct intel_encoder *encoder, |
| 3795 | const struct intel_crtc_state *old_crtc_state); |
Ander Conselvan de Oliveira | b7fa22d | 2016-04-27 15:44:17 +0300 | [diff] [blame] | 3796 | |
Ander Conselvan de Oliveira | 53d9872 | 2016-04-27 15:44:22 +0300 | [diff] [blame] | 3797 | void vlv_set_phy_signal_level(struct intel_encoder *encoder, |
| 3798 | u32 demph_reg_value, u32 preemph_reg_value, |
| 3799 | u32 uniqtranscale_reg_value, u32 tx3_demph); |
Ville Syrjälä | 2e1029c | 2017-10-31 22:51:18 +0200 | [diff] [blame] | 3800 | void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, |
| 3801 | const struct intel_crtc_state *crtc_state); |
| 3802 | void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, |
| 3803 | const struct intel_crtc_state *crtc_state); |
| 3804 | void vlv_phy_reset_lanes(struct intel_encoder *encoder, |
| 3805 | const struct intel_crtc_state *old_crtc_state); |
Ander Conselvan de Oliveira | 53d9872 | 2016-04-27 15:44:22 +0300 | [diff] [blame] | 3806 | |
Ville Syrjälä | 616bc82 | 2015-01-23 21:04:25 +0200 | [diff] [blame] | 3807 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); |
| 3808 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); |
Tvrtko Ursulin | 36cc8b9 | 2017-11-21 18:18:51 +0000 | [diff] [blame] | 3809 | u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, |
Mika Kuoppala | c5a0ad1 | 2017-03-15 17:43:00 +0200 | [diff] [blame] | 3810 | const i915_reg_t reg); |
Deepak S | c8d9a59 | 2013-11-23 14:55:42 +0530 | [diff] [blame] | 3811 | |
Tvrtko Ursulin | c84b270 | 2017-11-21 18:18:44 +0000 | [diff] [blame] | 3812 | u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); |
| 3813 | |
Tvrtko Ursulin | 36cc8b9 | 2017-11-21 18:18:51 +0000 | [diff] [blame] | 3814 | static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, |
| 3815 | const i915_reg_t reg) |
| 3816 | { |
| 3817 | return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); |
| 3818 | } |
| 3819 | |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 3820 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
| 3821 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 3822 | |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 3823 | #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) |
| 3824 | #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) |
| 3825 | #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) |
| 3826 | #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 3827 | |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 3828 | #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) |
| 3829 | #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) |
| 3830 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) |
| 3831 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) |
Keith Packard | 5f75377 | 2010-11-22 09:24:22 +0000 | [diff] [blame] | 3832 | |
Chris Wilson | 698b313 | 2014-03-21 13:16:43 +0000 | [diff] [blame] | 3833 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they |
| 3834 | * will be implemented using 2 32-bit writes in an arbitrary order with |
| 3835 | * an arbitrary delay between them. This can cause the hardware to |
| 3836 | * act upon the intermediate value, possibly leading to corruption and |
Chris Wilson | b18c1bb | 2016-09-06 15:45:38 +0100 | [diff] [blame] | 3837 | * machine death. For this reason we do not support I915_WRITE64, or |
| 3838 | * dev_priv->uncore.funcs.mmio_writeq. |
| 3839 | * |
| 3840 | * When reading a 64-bit value as two 32-bit values, the delay may cause |
| 3841 | * the two reads to mismatch, e.g. a timestamp overflowing. Also note that |
| 3842 | * occasionally a 64-bit register does not actualy support a full readq |
| 3843 | * and must be read using two 32-bit reads. |
| 3844 | * |
| 3845 | * You have been warned. |
Chris Wilson | 698b313 | 2014-03-21 13:16:43 +0000 | [diff] [blame] | 3846 | */ |
Ben Widawsky | 0b27448 | 2013-10-04 21:22:51 -0700 | [diff] [blame] | 3847 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 3848 | |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 3849 | #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ |
Chris Wilson | acd29f7 | 2015-09-08 14:17:13 +0100 | [diff] [blame] | 3850 | u32 upper, lower, old_upper, loop = 0; \ |
| 3851 | upper = I915_READ(upper_reg); \ |
Chris Wilson | ee0a227 | 2015-07-15 09:50:42 +0100 | [diff] [blame] | 3852 | do { \ |
Chris Wilson | acd29f7 | 2015-09-08 14:17:13 +0100 | [diff] [blame] | 3853 | old_upper = upper; \ |
Chris Wilson | ee0a227 | 2015-07-15 09:50:42 +0100 | [diff] [blame] | 3854 | lower = I915_READ(lower_reg); \ |
Chris Wilson | acd29f7 | 2015-09-08 14:17:13 +0100 | [diff] [blame] | 3855 | upper = I915_READ(upper_reg); \ |
| 3856 | } while (upper != old_upper && loop++ < 2); \ |
Chris Wilson | ee0a227 | 2015-07-15 09:50:42 +0100 | [diff] [blame] | 3857 | (u64)upper << 32 | lower; }) |
Chris Wilson | 5087744 | 2014-03-21 12:41:53 +0000 | [diff] [blame] | 3858 | |
Zou Nan hai | cae5852 | 2010-11-09 17:17:32 +0800 | [diff] [blame] | 3859 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
| 3860 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
| 3861 | |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3862 | #define __raw_read(x, s) \ |
Chris Wilson | 6e3955a | 2017-03-23 10:19:43 +0000 | [diff] [blame] | 3863 | static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 3864 | i915_reg_t reg) \ |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3865 | { \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 3866 | return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3867 | } |
| 3868 | |
| 3869 | #define __raw_write(x, s) \ |
Chris Wilson | 6e3955a | 2017-03-23 10:19:43 +0000 | [diff] [blame] | 3870 | static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 3871 | i915_reg_t reg, uint##x##_t val) \ |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3872 | { \ |
Ville Syrjälä | f0f59a0 | 2015-11-18 15:33:26 +0200 | [diff] [blame] | 3873 | write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3874 | } |
| 3875 | __raw_read(8, b) |
| 3876 | __raw_read(16, w) |
| 3877 | __raw_read(32, l) |
| 3878 | __raw_read(64, q) |
| 3879 | |
| 3880 | __raw_write(8, b) |
| 3881 | __raw_write(16, w) |
| 3882 | __raw_write(32, l) |
| 3883 | __raw_write(64, q) |
| 3884 | |
| 3885 | #undef __raw_read |
| 3886 | #undef __raw_write |
| 3887 | |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 3888 | /* These are untraced mmio-accessors that are only valid to be used inside |
Arkadiusz Hiler | aafee2e | 2016-10-25 14:48:02 +0200 | [diff] [blame] | 3889 | * critical sections, such as inside IRQ handlers, where forcewake is explicitly |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 3890 | * controlled. |
Arkadiusz Hiler | aafee2e | 2016-10-25 14:48:02 +0200 | [diff] [blame] | 3891 | * |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 3892 | * Think twice, and think again, before using these. |
Arkadiusz Hiler | aafee2e | 2016-10-25 14:48:02 +0200 | [diff] [blame] | 3893 | * |
| 3894 | * As an example, these accessors can possibly be used between: |
| 3895 | * |
| 3896 | * spin_lock_irq(&dev_priv->uncore.lock); |
| 3897 | * intel_uncore_forcewake_get__locked(); |
| 3898 | * |
| 3899 | * and |
| 3900 | * |
| 3901 | * intel_uncore_forcewake_put__locked(); |
| 3902 | * spin_unlock_irq(&dev_priv->uncore.lock); |
| 3903 | * |
| 3904 | * |
| 3905 | * Note: some registers may not need forcewake held, so |
| 3906 | * intel_uncore_forcewake_{get,put} can be omitted, see |
| 3907 | * intel_uncore_forcewake_for_reg(). |
| 3908 | * |
| 3909 | * Certain architectures will die if the same cacheline is concurrently accessed |
| 3910 | * by different clients (e.g. on Ivybridge). Access to registers should |
| 3911 | * therefore generally be serialised, by either the dev_priv->uncore.lock or |
| 3912 | * a more localised lock guarding all access to that bank of registers. |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 3913 | */ |
Ville Syrjälä | 75aa3f6 | 2015-10-22 15:34:56 +0300 | [diff] [blame] | 3914 | #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) |
| 3915 | #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) |
Chris Wilson | 76f8421 | 2016-06-30 15:33:45 +0100 | [diff] [blame] | 3916 | #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) |
Chris Wilson | a6111f7 | 2015-04-07 16:21:02 +0100 | [diff] [blame] | 3917 | #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) |
| 3918 | |
Ville Syrjälä | 55bc60d | 2013-01-17 16:31:29 +0200 | [diff] [blame] | 3919 | /* "Broadcast RGB" property */ |
| 3920 | #define INTEL_BROADCAST_RGB_AUTO 0 |
| 3921 | #define INTEL_BROADCAST_RGB_FULL 1 |
| 3922 | #define INTEL_BROADCAST_RGB_LIMITED 2 |
Yuanhan Liu | ba4f01a | 2010-11-08 17:09:41 +0800 | [diff] [blame] | 3923 | |
Tvrtko Ursulin | 920a14b | 2016-10-14 10:13:44 +0100 | [diff] [blame] | 3924 | static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) |
Ville Syrjälä | 766aa1c | 2013-01-25 21:44:46 +0200 | [diff] [blame] | 3925 | { |
Tvrtko Ursulin | 920a14b | 2016-10-14 10:13:44 +0100 | [diff] [blame] | 3926 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
Ville Syrjälä | 766aa1c | 2013-01-25 21:44:46 +0200 | [diff] [blame] | 3927 | return VLV_VGACNTRL; |
Tvrtko Ursulin | 920a14b | 2016-10-14 10:13:44 +0100 | [diff] [blame] | 3928 | else if (INTEL_GEN(dev_priv) >= 5) |
Sonika Jindal | 92e23b9 | 2014-07-21 15:23:40 +0530 | [diff] [blame] | 3929 | return CPU_VGACNTRL; |
Ville Syrjälä | 766aa1c | 2013-01-25 21:44:46 +0200 | [diff] [blame] | 3930 | else |
| 3931 | return VGACNTRL; |
| 3932 | } |
| 3933 | |
Imre Deak | df97729 | 2013-05-21 20:03:17 +0300 | [diff] [blame] | 3934 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
| 3935 | { |
| 3936 | unsigned long j = msecs_to_jiffies(m); |
| 3937 | |
| 3938 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
| 3939 | } |
| 3940 | |
Daniel Vetter | 7bd0e22 | 2014-12-04 11:12:54 +0100 | [diff] [blame] | 3941 | static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) |
| 3942 | { |
Chris Wilson | b805014 | 2017-08-11 11:57:31 +0100 | [diff] [blame] | 3943 | /* nsecs_to_jiffies64() does not guard against overflow */ |
| 3944 | if (NSEC_PER_SEC % HZ && |
| 3945 | div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) |
| 3946 | return MAX_JIFFY_OFFSET; |
| 3947 | |
Daniel Vetter | 7bd0e22 | 2014-12-04 11:12:54 +0100 | [diff] [blame] | 3948 | return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); |
| 3949 | } |
| 3950 | |
Imre Deak | df97729 | 2013-05-21 20:03:17 +0300 | [diff] [blame] | 3951 | static inline unsigned long |
| 3952 | timespec_to_jiffies_timeout(const struct timespec *value) |
| 3953 | { |
| 3954 | unsigned long j = timespec_to_jiffies(value); |
| 3955 | |
| 3956 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
| 3957 | } |
| 3958 | |
Paulo Zanoni | dce56b3 | 2013-12-19 14:29:40 -0200 | [diff] [blame] | 3959 | /* |
| 3960 | * If you need to wait X milliseconds between events A and B, but event B |
| 3961 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of |
| 3962 | * when event A happened, then just before event B you call this function and |
| 3963 | * pass the timestamp as the first argument, and X as the second argument. |
| 3964 | */ |
| 3965 | static inline void |
| 3966 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) |
| 3967 | { |
Imre Deak | ec5e0cf | 2014-01-29 13:25:40 +0200 | [diff] [blame] | 3968 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; |
Paulo Zanoni | dce56b3 | 2013-12-19 14:29:40 -0200 | [diff] [blame] | 3969 | |
| 3970 | /* |
| 3971 | * Don't re-read the value of "jiffies" every time since it may change |
| 3972 | * behind our back and break the math. |
| 3973 | */ |
| 3974 | tmp_jiffies = jiffies; |
| 3975 | target_jiffies = timestamp_jiffies + |
| 3976 | msecs_to_jiffies_timeout(to_wait_ms); |
| 3977 | |
| 3978 | if (time_after(target_jiffies, tmp_jiffies)) { |
Imre Deak | ec5e0cf | 2014-01-29 13:25:40 +0200 | [diff] [blame] | 3979 | remaining_jiffies = target_jiffies - tmp_jiffies; |
| 3980 | while (remaining_jiffies) |
| 3981 | remaining_jiffies = |
| 3982 | schedule_timeout_uninterruptible(remaining_jiffies); |
Paulo Zanoni | dce56b3 | 2013-12-19 14:29:40 -0200 | [diff] [blame] | 3983 | } |
| 3984 | } |
Chris Wilson | 221fe79 | 2016-09-09 14:11:51 +0100 | [diff] [blame] | 3985 | |
| 3986 | static inline bool |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 3987 | __i915_request_irq_complete(const struct drm_i915_gem_request *req) |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 3988 | { |
Chris Wilson | f69a02c | 2016-07-01 17:23:16 +0100 | [diff] [blame] | 3989 | struct intel_engine_cs *engine = req->engine; |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 3990 | u32 seqno; |
Chris Wilson | f69a02c | 2016-07-01 17:23:16 +0100 | [diff] [blame] | 3991 | |
Chris Wilson | 309663a | 2017-02-23 07:44:07 +0000 | [diff] [blame] | 3992 | /* Note that the engine may have wrapped around the seqno, and |
| 3993 | * so our request->global_seqno will be ahead of the hardware, |
| 3994 | * even though it completed the request before wrapping. We catch |
| 3995 | * this by kicking all the waiters before resetting the seqno |
| 3996 | * in hardware, and also signal the fence. |
| 3997 | */ |
| 3998 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) |
| 3999 | return true; |
| 4000 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 4001 | /* The request was dequeued before we were awoken. We check after |
| 4002 | * inspecting the hw to confirm that this was the same request |
| 4003 | * that generated the HWS update. The memory barriers within |
| 4004 | * the request execution are sufficient to ensure that a check |
| 4005 | * after reading the value from hw matches this request. |
| 4006 | */ |
| 4007 | seqno = i915_gem_request_global_seqno(req); |
| 4008 | if (!seqno) |
| 4009 | return false; |
| 4010 | |
Chris Wilson | 7ec2c73 | 2016-07-01 17:23:22 +0100 | [diff] [blame] | 4011 | /* Before we do the heavier coherent read of the seqno, |
| 4012 | * check the value (hopefully) in the CPU cacheline. |
| 4013 | */ |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 4014 | if (__i915_gem_request_completed(req, seqno)) |
Chris Wilson | 7ec2c73 | 2016-07-01 17:23:22 +0100 | [diff] [blame] | 4015 | return true; |
| 4016 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 4017 | /* Ensure our read of the seqno is coherent so that we |
| 4018 | * do not "miss an interrupt" (i.e. if this is the last |
| 4019 | * request and the seqno write from the GPU is not visible |
| 4020 | * by the time the interrupt fires, we will see that the |
| 4021 | * request is incomplete and go back to sleep awaiting |
| 4022 | * another interrupt that will never come.) |
| 4023 | * |
| 4024 | * Strictly, we only need to do this once after an interrupt, |
| 4025 | * but it is easier and safer to do it every time the waiter |
| 4026 | * is woken. |
| 4027 | */ |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 4028 | if (engine->irq_seqno_barrier && |
Chris Wilson | 538b257 | 2017-01-24 15:18:05 +0000 | [diff] [blame] | 4029 | test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { |
Chris Wilson | 56299fb | 2017-02-27 20:58:48 +0000 | [diff] [blame] | 4030 | struct intel_breadcrumbs *b = &engine->breadcrumbs; |
Chris Wilson | 99fe4a5 | 2016-07-06 12:39:01 +0100 | [diff] [blame] | 4031 | |
Chris Wilson | 3d5564e | 2016-07-01 17:23:23 +0100 | [diff] [blame] | 4032 | /* The ordering of irq_posted versus applying the barrier |
| 4033 | * is crucial. The clearing of the current irq_posted must |
| 4034 | * be visible before we perform the barrier operation, |
| 4035 | * such that if a subsequent interrupt arrives, irq_posted |
| 4036 | * is reasserted and our task rewoken (which causes us to |
| 4037 | * do another __i915_request_irq_complete() immediately |
| 4038 | * and reapply the barrier). Conversely, if the clear |
| 4039 | * occurs after the barrier, then an interrupt that arrived |
| 4040 | * whilst we waited on the barrier would not trigger a |
| 4041 | * barrier on the next pass, and the read may not see the |
| 4042 | * seqno update. |
| 4043 | */ |
Chris Wilson | f69a02c | 2016-07-01 17:23:16 +0100 | [diff] [blame] | 4044 | engine->irq_seqno_barrier(engine); |
Chris Wilson | 99fe4a5 | 2016-07-06 12:39:01 +0100 | [diff] [blame] | 4045 | |
| 4046 | /* If we consume the irq, but we are no longer the bottom-half, |
| 4047 | * the real bottom-half may not have serialised their own |
| 4048 | * seqno check with the irq-barrier (i.e. may have inspected |
| 4049 | * the seqno before we believe it coherent since they see |
| 4050 | * irq_posted == false but we are still running). |
| 4051 | */ |
Tvrtko Ursulin | 2c33b54 | 2017-03-06 15:03:19 +0000 | [diff] [blame] | 4052 | spin_lock_irq(&b->irq_lock); |
Chris Wilson | 61d3dc7 | 2017-03-03 19:08:24 +0000 | [diff] [blame] | 4053 | if (b->irq_wait && b->irq_wait->tsk != current) |
Chris Wilson | 99fe4a5 | 2016-07-06 12:39:01 +0100 | [diff] [blame] | 4054 | /* Note that if the bottom-half is changed as we |
| 4055 | * are sending the wake-up, the new bottom-half will |
| 4056 | * be woken by whomever made the change. We only have |
| 4057 | * to worry about when we steal the irq-posted for |
| 4058 | * ourself. |
| 4059 | */ |
Chris Wilson | 61d3dc7 | 2017-03-03 19:08:24 +0000 | [diff] [blame] | 4060 | wake_up_process(b->irq_wait->tsk); |
Tvrtko Ursulin | 2c33b54 | 2017-03-06 15:03:19 +0000 | [diff] [blame] | 4061 | spin_unlock_irq(&b->irq_lock); |
Chris Wilson | 99fe4a5 | 2016-07-06 12:39:01 +0100 | [diff] [blame] | 4062 | |
Chris Wilson | 754c9fd | 2017-02-23 07:44:14 +0000 | [diff] [blame] | 4063 | if (__i915_gem_request_completed(req, seqno)) |
Chris Wilson | 7ec2c73 | 2016-07-01 17:23:22 +0100 | [diff] [blame] | 4064 | return true; |
| 4065 | } |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 4066 | |
Chris Wilson | 688e6c7 | 2016-07-01 17:23:15 +0100 | [diff] [blame] | 4067 | return false; |
| 4068 | } |
| 4069 | |
Chris Wilson | 0b1de5d | 2016-08-12 12:39:59 +0100 | [diff] [blame] | 4070 | void i915_memcpy_init_early(struct drm_i915_private *dev_priv); |
| 4071 | bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); |
| 4072 | |
Chris Wilson | c4d3ae6 | 2017-01-06 15:20:09 +0000 | [diff] [blame] | 4073 | /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, |
| 4074 | * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot |
| 4075 | * perform the operation. To check beforehand, pass in the parameters to |
| 4076 | * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits, |
| 4077 | * you only need to pass in the minor offsets, page-aligned pointers are |
| 4078 | * always valid. |
| 4079 | * |
| 4080 | * For just checking for SSE4.1, in the foreknowledge that the future use |
| 4081 | * will be correctly aligned, just use i915_has_memcpy_from_wc(). |
| 4082 | */ |
| 4083 | #define i915_can_memcpy_from_wc(dst, src, len) \ |
| 4084 | i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0) |
| 4085 | |
| 4086 | #define i915_has_memcpy_from_wc() \ |
| 4087 | i915_memcpy_from_wc(NULL, NULL, 0) |
| 4088 | |
Chris Wilson | c58305a | 2016-08-19 16:54:28 +0100 | [diff] [blame] | 4089 | /* i915_mm.c */ |
| 4090 | int remap_io_mapping(struct vm_area_struct *vma, |
| 4091 | unsigned long addr, unsigned long pfn, unsigned long size, |
| 4092 | struct io_mapping *iomap); |
| 4093 | |
Chris Wilson | 767a983 | 2017-09-13 09:56:05 +0100 | [diff] [blame] | 4094 | static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) |
| 4095 | { |
| 4096 | if (INTEL_GEN(i915) >= 10) |
| 4097 | return CNL_HWS_CSB_WRITE_INDEX; |
| 4098 | else |
| 4099 | return I915_HWS_CSB_WRITE_INDEX; |
| 4100 | } |
| 4101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4102 | #endif |