blob: ae99a91902002ff0f006d2e4ebdd5f63c2e496df [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Dave Airliebc54fd12005-06-23 22:46:46 +10004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
Chris Wilsone9b73c62012-12-03 21:03:14 +000033#include <uapi/drm/i915_drm.h>
Tvrtko Ursulin93b81f52015-02-10 17:16:05 +000034#include <uapi/drm/drm_fourcc.h>
Chris Wilsone9b73c62012-12-03 21:03:14 +000035
Keith Packard0839ccb2008-10-30 19:38:48 -070036#include <linux/io-mapping.h>
Chris Wilsonf899fc62010-07-20 15:44:45 -070037#include <linux/i2c.h>
Daniel Vetterc167a6f2012-02-28 00:43:09 +010038#include <linux/i2c-algo-bit.h>
Matthew Garrettaaa6fd22011-08-12 12:11:33 +020039#include <linux/backlight.h>
Chris Wilson4ff4b442017-06-16 15:05:16 +010040#include <linux/hash.h>
Ben Widawsky2911a352012-04-05 14:47:36 -070041#include <linux/intel-iommu.h>
Daniel Vetter742cbee2012-04-27 15:17:39 +020042#include <linux/kref.h>
Chris Wilson52137012018-06-06 22:45:20 +010043#include <linux/mm_types.h>
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +000044#include <linux/perf_event.h>
Daniel Vetter9ee32fea2012-12-01 13:53:48 +010045#include <linux/pm_qos.h>
Christian König52791ee2019-08-11 10:06:32 +020046#include <linux/dma-resv.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010047#include <linux/shmem_fs.h>
Chris Wilsonbd780f32019-01-14 14:21:09 +000048#include <linux/stackdepot.h>
Tvrtko Ursulinc1007772019-12-24 09:59:20 +000049#include <linux/xarray.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010050
Chris Wilsone73bdd22016-04-13 17:35:01 +010051#include <drm/intel-gtt.h>
52#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
53#include <drm/drm_gem.h>
Daniel Vetter3b96a0b2016-06-21 10:54:22 +020054#include <drm/drm_auth.h>
Gabriel Krisman Bertazif9a87bd2017-01-09 19:56:49 -020055#include <drm/drm_cache.h>
Daniel Vetterd78aa652018-09-05 15:57:05 +020056#include <drm/drm_util.h>
Manasi Navare7b610f12018-11-28 12:26:12 -080057#include <drm/drm_dsc.h>
Ville Syrjäläc457d9c2019-05-24 18:36:14 +030058#include <drm/drm_atomic.h>
Jani Nikula2f80d7b2019-01-08 10:27:09 +020059#include <drm/drm_connector.h>
Ramalingam C9055aac2019-02-16 23:06:51 +053060#include <drm/i915_mei_hdcp_interface.h>
Chris Wilsone73bdd22016-04-13 17:35:01 +010061
62#include "i915_params.h"
63#include "i915_reg.h"
Chris Wilson40b326e2017-01-05 15:30:22 +000064#include "i915_utils.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010065
Jani Nikuladf0566a2019-06-13 11:44:16 +030066#include "display/intel_bios.h"
67#include "display/intel_display.h"
68#include "display/intel_display_power.h"
69#include "display/intel_dpll_mgr.h"
Animesh Manna67f3b582019-09-20 17:29:22 +053070#include "display/intel_dsb.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030071#include "display/intel_frontbuffer.h"
Ville Syrjälä0ef19052020-01-20 19:47:24 +020072#include "display/intel_global_state.h"
Daniele Ceraolo Spurio4e3f12d2019-08-15 18:23:40 -070073#include "display/intel_gmbus.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030074#include "display/intel_opregion.h"
75
Jani Nikula6401faf2019-08-08 16:42:48 +030076#include "gem/i915_gem_context_types.h"
Jani Nikulabe80bc32019-08-08 16:42:49 +030077#include "gem/i915_gem_shrinker.h"
Jani Nikula6401faf2019-08-08 16:42:48 +030078#include "gem/i915_gem_stolen.h"
79
Chris Wilson112ed2d2019-04-24 18:48:39 +010080#include "gt/intel_lrc.h"
81#include "gt/intel_engine.h"
Tvrtko Ursuline5be5c72019-06-21 08:07:40 +010082#include "gt/intel_gt_types.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010083#include "gt/intel_workarounds.h"
Daniele Ceraolo Spurio0f261b22019-07-13 11:00:11 +010084#include "gt/uc/intel_uc.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010085
Michal Wajdeczkob9785202017-12-21 21:57:32 +000086#include "intel_device_info.h"
Jani Nikula707d26d2019-08-07 15:04:15 +030087#include "intel_pch.h"
Jani Nikula0d5adc52019-04-29 15:29:36 +030088#include "intel_runtime_pm.h"
Matthew Auld232a6eb2019-10-08 17:01:14 +010089#include "intel_memory_region.h"
Michal Wajdeczko3846a9b2017-12-21 21:57:31 +000090#include "intel_uncore.h"
Chris Wilsond91e6572019-04-24 21:07:13 +010091#include "intel_wakeref.h"
Jackie Li6b0478f2018-03-13 17:32:50 -070092#include "intel_wopcm.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010093
Chris Wilsond501b1d2016-04-13 17:35:02 +010094#include "i915_gem.h"
Chris Wilsone73bdd22016-04-13 17:35:01 +010095#include "i915_gem_gtt.h"
Michal Wajdeczkod897a112018-03-08 09:50:37 +000096#include "i915_gpu_error.h"
Lionel Landwerlin1d0f2eb2019-09-09 12:31:09 +030097#include "i915_perf_types.h"
Chris Wilsone61e0f52018-02-21 09:56:36 +000098#include "i915_request.h"
Chris Wilsonb7268c52018-04-18 19:40:52 +010099#include "i915_scheduler.h"
Tvrtko Ursulinf0c02c12019-06-21 08:08:10 +0100100#include "gt/intel_timeline.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200101#include "i915_vma.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +0300102#include "i915_irq.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200103
Matthew Auldb908be52019-10-25 16:37:22 +0100104#include "intel_region_lmem.h"
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/* General customization:
107 */
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#define DRIVER_NAME "i915"
110#define DRIVER_DESC "Intel Graphics"
Joonas Lahtinen3a36aa22020-05-15 14:49:24 +0300111#define DRIVER_DATE "20200515"
112#define DRIVER_TIMESTAMP 1589543364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Chris Wilson5e5d2e22019-05-28 10:29:42 +0100114struct drm_i915_gem_object;
115
Vivek Kasireddy270810a2020-03-04 15:42:40 -0800116/*
117 * The code assumes that the hpd_pins below have consecutive values and
118 * starting with HPD_PORT_A, the HPD pin associated with any port can be
119 * retrieved by adding the corresponding port (or phy) enum value to
120 * HPD_PORT_A in most cases. For example:
121 * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
122 */
Egbert Eich1d843f92013-02-25 12:06:49 -0500123enum hpd_pin {
124 HPD_NONE = 0,
Egbert Eich1d843f92013-02-25 12:06:49 -0500125 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
126 HPD_CRT,
127 HPD_SDVO_B,
128 HPD_SDVO_C,
Imre Deakcc24fcd2015-07-21 15:32:45 -0700129 HPD_PORT_A,
Egbert Eich1d843f92013-02-25 12:06:49 -0500130 HPD_PORT_B,
131 HPD_PORT_C,
132 HPD_PORT_D,
Xiong Zhang26951ca2015-08-17 15:55:50 +0800133 HPD_PORT_E,
Dhinakaran Pandiyan96ae4832018-03-23 10:24:17 -0700134 HPD_PORT_F,
Lucas De Marchi52dfdba2019-07-25 16:48:11 -0700135 HPD_PORT_G,
136 HPD_PORT_H,
137 HPD_PORT_I,
138
Egbert Eich1d843f92013-02-25 12:06:49 -0500139 HPD_NUM_PINS
140};
141
Jani Nikulac91711f2015-05-28 15:43:48 +0300142#define for_each_hpd_pin(__pin) \
143 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
144
Lyude Paul9a64c652018-11-06 16:30:16 -0500145/* Threshold == 5 for long IRQs, 50 for short */
146#define HPD_STORM_DEFAULT_THRESHOLD 50
Lyude317eaa92017-02-03 21:18:25 -0500147
Jani Nikula5fcece82015-05-27 15:03:42 +0300148struct i915_hotplug {
Imre Deak39447092019-07-11 17:53:42 -0700149 struct delayed_work hotplug_work;
Jani Nikula5fcece82015-05-27 15:03:42 +0300150
Ville Syrjälä03989932020-05-07 14:48:08 +0300151 const u32 *hpd, *pch_hpd;
152
Jani Nikula5fcece82015-05-27 15:03:42 +0300153 struct {
154 unsigned long last_jiffies;
155 int count;
156 enum {
157 HPD_ENABLED = 0,
158 HPD_DISABLED = 1,
159 HPD_MARK_DISABLED = 2
160 } state;
161 } stats[HPD_NUM_PINS];
162 u32 event_bits;
Imre Deak39447092019-07-11 17:53:42 -0700163 u32 retry_bits;
Jani Nikula5fcece82015-05-27 15:03:42 +0300164 struct delayed_work reenable_work;
165
Jani Nikula5fcece82015-05-27 15:03:42 +0300166 u32 long_port_mask;
167 u32 short_port_mask;
168 struct work_struct dig_port_work;
169
Lyude19625e82016-06-21 17:03:44 -0400170 struct work_struct poll_init_work;
171 bool poll_enabled;
172
Lyude317eaa92017-02-03 21:18:25 -0500173 unsigned int hpd_storm_threshold;
Lyude Paul9a64c652018-11-06 16:30:16 -0500174 /* Whether or not to count short HPD IRQs in HPD storms */
175 u8 hpd_short_storm_enabled;
Lyude317eaa92017-02-03 21:18:25 -0500176
Jani Nikula5fcece82015-05-27 15:03:42 +0300177 /*
178 * if we get a HPD irq from DP and a HPD irq from non-DP
179 * the non-DP HPD could block the workqueue on a mode config
180 * mutex getting, that userspace may have taken. However
181 * userspace is waiting on the DP workqueue to run which is
182 * blocked behind the non-DP one.
183 */
184 struct workqueue_struct *dp_wq;
185};
186
Chris Wilson2a2d5482012-12-03 11:49:06 +0000187#define I915_GEM_GPU_DOMAINS \
188 (I915_GEM_DOMAIN_RENDER | \
189 I915_GEM_DOMAIN_SAMPLER | \
190 I915_GEM_DOMAIN_COMMAND | \
191 I915_GEM_DOMAIN_INSTRUCTION | \
192 I915_GEM_DOMAIN_VERTEX)
Eric Anholt62fdfea2010-05-21 13:26:39 -0700193
Daniel Vettere7b903d2013-06-05 13:34:14 +0200194struct drm_i915_private;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100195struct i915_mm_struct;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100196struct i915_mmu_object;
Daniel Vettere7b903d2013-06-05 13:34:14 +0200197
Chris Wilsona6f766f2015-04-27 13:41:20 +0100198struct drm_i915_file_private {
199 struct drm_i915_private *dev_priv;
Chris Wilson77715902019-08-23 19:14:55 +0100200
201 union {
202 struct drm_file *file;
203 struct rcu_head rcu;
204 };
Chris Wilsona6f766f2015-04-27 13:41:20 +0100205
206 struct {
207 spinlock_t lock;
208 struct list_head request_list;
209 } mm;
Chris Wilson7dc40712019-03-21 14:07:09 +0000210
Tvrtko Ursulinc1007772019-12-24 09:59:20 +0000211 struct xarray context_xa;
Chris Wilson5dbd2b72020-01-22 16:15:31 +0000212 struct xarray vm_xa;
Chris Wilsone0695db2019-03-22 09:23:23 +0000213
Chris Wilsonc80ff162016-07-27 09:07:27 +0100214 unsigned int bsd_engine;
Mika Kuoppalab083a082016-11-18 15:10:47 +0200215
Mika Kuoppala14921f32018-06-15 13:44:29 +0300216/*
217 * Every context ban increments per client ban score. Also
218 * hangs in short succession increments ban score. If ban threshold
219 * is reached, client is considered banned and submitting more work
220 * will fail. This is a stop gap measure to limit the badly behaving
221 * clients access to gpu. Note that unbannable contexts never increment
222 * the client ban score.
Mika Kuoppalab083a082016-11-18 15:10:47 +0200223 */
Mika Kuoppala14921f32018-06-15 13:44:29 +0300224#define I915_CLIENT_SCORE_HANG_FAST 1
225#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
226#define I915_CLIENT_SCORE_CONTEXT_BAN 3
227#define I915_CLIENT_SCORE_BANNED 9
228 /** ban_score: Accumulated score of all ctx bans and fast hangs. */
229 atomic_t ban_score;
230 unsigned long hang_timestamp;
Chris Wilsona6f766f2015-04-27 13:41:20 +0100231};
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233/* Interface history:
234 *
235 * 1.1: Original.
Dave Airlie0d6aa602006-01-02 20:14:23 +1100236 * 1.2: Add Power Management
237 * 1.3: Add vblank support
Dave Airliede227f52006-01-25 15:31:43 +1100238 * 1.4: Fix cmdbuffer path, add heap destroy
Dave Airlie702880f2006-06-24 17:07:34 +1000239 * 1.5: Add vblank pipe configuration
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000240 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
241 * - Support vertical blank on secondary display pipe
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 */
243#define DRIVER_MAJOR 1
=?utf-8?q?Michel_D=C3=A4nzer?=2228ed62006-10-25 01:05:09 +1000244#define DRIVER_MINOR 6
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245#define DRIVER_PATCHLEVEL 0
246
Chris Wilson6ef3d422010-08-04 20:26:07 +0100247struct intel_overlay;
248struct intel_overlay_error_state;
249
yakui_zhao9b9d1722009-05-31 17:17:17 +0800250struct sdvo_device_mapping {
Chris Wilsone957d772010-09-24 12:52:03 +0100251 u8 initialized;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800252 u8 dvo_port;
253 u8 slave_addr;
254 u8 dvo_wiring;
Chris Wilsone957d772010-09-24 12:52:03 +0100255 u8 i2c_pin;
Adam Jacksonb1083332010-04-23 16:07:40 -0400256 u8 ddc_pin;
yakui_zhao9b9d1722009-05-31 17:17:17 +0800257};
258
Jani Nikula7bd688c2013-11-08 16:48:56 +0200259struct intel_connector;
Jani Nikula820d2d72014-10-27 16:26:47 +0200260struct intel_encoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100261struct intel_atomic_state;
Ville Syrjälä0bb94e02020-01-20 19:47:17 +0200262struct intel_cdclk_config;
Ville Syrjälä28a30b42020-01-21 16:03:53 +0200263struct intel_cdclk_state;
264struct intel_cdclk_vals;
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000265struct intel_initial_plane_config;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100266struct intel_crtc;
Daniel Vetteree9300b2013-06-03 22:40:22 +0200267struct intel_limit;
268struct dpll;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +0100269
Jesse Barnese70236a2009-09-21 10:42:27 -0700270struct drm_i915_display_funcs {
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200271 void (*get_cdclk)(struct drm_i915_private *dev_priv,
Ville Syrjälä0bb94e02020-01-20 19:47:17 +0200272 struct intel_cdclk_config *cdclk_config);
Ville Syrjäläb0587e42017-01-26 21:52:01 +0200273 void (*set_cdclk)(struct drm_i915_private *dev_priv,
Ville Syrjälä0bb94e02020-01-20 19:47:17 +0200274 const struct intel_cdclk_config *cdclk_config,
Ville Syrjälä59f9e9c2019-03-27 12:13:21 +0200275 enum pipe pipe);
Ville Syrjäläbdaf8432017-11-17 21:19:11 +0200276 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
277 enum i9xx_plane_id i9xx_plane);
Maarten Lankhorstec193642019-06-28 10:55:17 +0200278 int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
279 int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100280 void (*initial_watermarks)(struct intel_atomic_state *state,
Ville Syrjälä7a8fdb1f2019-11-18 18:44:26 +0200281 struct intel_crtc *crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100282 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
Ville Syrjälä7a8fdb1f2019-11-18 18:44:26 +0200283 struct intel_crtc *crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +0100284 void (*optimize_watermarks)(struct intel_atomic_state *state,
Ville Syrjälä7a8fdb1f2019-11-18 18:44:26 +0200285 struct intel_crtc *crtc);
Matt Ropercd1d3ee2018-12-10 13:54:14 -0800286 int (*compute_global_watermarks)(struct intel_atomic_state *state);
Ville Syrjälä432081b2016-10-31 22:37:03 +0200287 void (*update_wm)(struct intel_crtc *crtc);
Ville Syrjälä28a30b42020-01-21 16:03:53 +0200288 int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
Matt Roperd2f429e2019-09-10 08:42:50 -0700289 u8 (*calc_voltage_level)(int cdclk);
Daniel Vetter0e8ffe12013-03-28 10:42:00 +0100290 /* Returns the active state of the crtc, and if the crtc is active,
291 * fills out the pipe-config with the hw state. */
292 bool (*get_pipe_config)(struct intel_crtc *,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200293 struct intel_crtc_state *);
Damien Lespiau5724dbd2015-01-20 12:51:52 +0000294 void (*get_initial_plane_config)(struct intel_crtc *,
295 struct intel_initial_plane_config *);
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +0200296 int (*crtc_compute_clock)(struct intel_crtc *crtc,
297 struct intel_crtc_state *crtc_state);
Ville Syrjälä7451a072019-11-18 18:44:30 +0200298 void (*crtc_enable)(struct intel_atomic_state *state,
299 struct intel_crtc *crtc);
300 void (*crtc_disable)(struct intel_atomic_state *state,
301 struct intel_crtc *crtc);
Manasi Navare0c841272019-08-27 15:17:34 -0700302 void (*commit_modeset_enables)(struct intel_atomic_state *state);
Manasi Navare66d9cec2019-08-28 15:47:01 -0700303 void (*commit_modeset_disables)(struct intel_atomic_state *state);
Ville Syrjälä8ec47de2017-10-30 20:46:53 +0200304 void (*audio_codec_enable)(struct intel_encoder *encoder,
305 const struct intel_crtc_state *crtc_state,
306 const struct drm_connector_state *conn_state);
307 void (*audio_codec_disable)(struct intel_encoder *encoder,
308 const struct intel_crtc_state *old_crtc_state,
309 const struct drm_connector_state *old_conn_state);
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +0200310 void (*fdi_link_train)(struct intel_crtc *crtc,
311 const struct intel_crtc_state *crtc_state);
Ville Syrjälä46f16e62016-10-31 22:37:22 +0200312 void (*init_clock_gating)(struct drm_i915_private *dev_priv);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100313 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
Jesse Barnese70236a2009-09-21 10:42:27 -0700314 /* clock updates for mode set */
315 /* cursor updates */
316 /* render clock increase/decrease */
317 /* display clock increase/decrease */
318 /* pll clock increase/decrease */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +0000319
Ville Syrjälä9d9cb9c2019-03-27 17:50:37 +0200320 int (*color_check)(struct intel_crtc_state *crtc_state);
Ville Syrjälä4d8ed542019-02-05 18:08:40 +0200321 /*
322 * Program double buffered color management registers during
323 * vblank evasion. The registers should then latch during the
324 * next vblank start, alongside any other double buffered registers
325 * involved with the same commit.
326 */
327 void (*color_commit)(const struct intel_crtc_state *crtc_state);
328 /*
329 * Load LUTs (and other single buffered color management
330 * registers). Will (hopefully) be called during the vblank
331 * following the latching of any double buffered registers
332 * involved with the same commit.
333 */
Ville Syrjälä23b03a22019-02-05 18:08:38 +0200334 void (*load_luts)(const struct intel_crtc_state *crtc_state);
Swati Sharma2740e812019-05-29 15:20:51 +0530335 void (*read_luts)(struct intel_crtc_state *crtc_state);
Jesse Barnese70236a2009-09-21 10:42:27 -0700336};
337
Daniel Vettereb805622015-05-04 14:58:44 +0200338struct intel_csr {
Daniel Vetter8144ac52015-10-28 23:59:04 +0200339 struct work_struct work;
Daniel Vettereb805622015-05-04 14:58:44 +0200340 const char *fw_path;
Jani Nikula143c3352019-01-18 14:01:24 +0200341 u32 required_version;
342 u32 max_fw_size; /* bytes */
343 u32 *dmc_payload;
344 u32 dmc_fw_size; /* dwords */
345 u32 version;
346 u32 mmio_count;
Lucas De Marchi0703a532019-06-07 02:12:28 -0700347 i915_reg_t mmioaddr[20];
348 u32 mmiodata[20];
Jani Nikula143c3352019-01-18 14:01:24 +0200349 u32 dc_state;
Anshuman Gupta4645e902019-10-03 13:47:35 +0530350 u32 target_dc_state;
Jani Nikula143c3352019-01-18 14:01:24 +0200351 u32 allowed_dc_mask;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000352 intel_wakeref_t wakeref;
Daniel Vettereb805622015-05-04 14:58:44 +0200353};
354
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800355enum i915_cache_level {
356 I915_CACHE_NONE = 0,
Chris Wilson350ec882013-08-06 13:17:02 +0100357 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
358 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
359 caches, eg sampler/render caches, and the
360 large Last-Level-Cache. LLC is coherent with
361 the CPU, but L3 is only visible to the GPU. */
Chris Wilson651d7942013-08-08 14:41:10 +0100362 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
Daniel Vetter7faf1ab2013-01-24 14:44:55 -0800363};
364
Chris Wilson85fd4f52016-12-05 14:29:36 +0000365#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
366
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200367struct intel_fbc {
Paulo Zanoni25ad93f2015-07-02 19:25:10 -0300368 /* This is always the inner lock when overlapping with struct_mutex and
369 * it's the outer lock when overlapping with stolen_lock. */
370 struct mutex lock;
Ben Widawsky5e59f712014-06-30 10:41:24 -0700371 unsigned threshold;
Paulo Zanonidbef0f12015-02-13 17:23:46 -0200372 unsigned int possible_framebuffer_bits;
373 unsigned int busy_bits;
Paulo Zanonie35fef22015-02-09 14:46:29 -0200374 struct intel_crtc *crtc;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700375
Ben Widawskyc4213882014-06-19 12:06:10 -0700376 struct drm_mm_node compressed_fb;
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700377 struct drm_mm_node *compressed_llb;
378
Rodrigo Vivida46f932014-08-01 02:04:45 -0700379 bool false_color;
380
Paulo Zanoni0e631ad2015-10-14 17:45:36 -0300381 bool active;
Ville Syrjälä07fd0df2019-11-28 17:03:38 +0200382 bool activated;
Maarten Lankhorstc9855a52018-06-25 18:37:57 +0200383 bool flip_pending;
Paulo Zanoni9adccc62014-09-19 16:04:55 -0300384
Paulo Zanoni61a585d2016-09-13 10:38:57 -0300385 bool underrun_detected;
386 struct work_struct underrun_work;
387
Paulo Zanoni525a4f92017-07-14 16:38:22 -0300388 /*
389 * Due to the atomic rules we can't access some structures without the
390 * appropriate locking, so we cache information here in order to avoid
391 * these problems.
392 */
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200393 struct intel_fbc_state_cache {
394 struct {
395 unsigned int mode_flags;
Jani Nikula143c3352019-01-18 14:01:24 +0200396 u32 hsw_bdw_pixel_rate;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200397 } crtc;
398
399 struct {
400 unsigned int rotation;
401 int src_w;
402 int src_h;
403 bool visible;
Juha-Pekka Heikkilabf0a5d42017-10-17 23:08:07 +0300404 /*
405 * Display surface base address adjustement for
406 * pageflips. Note that on gen4+ this only adjusts up
407 * to a tile, offsets within a tile are handled in
408 * the hw itself (with the TILEOFF register).
409 */
410 int adjusted_x;
411 int adjusted_y;
Juha-Pekka Heikkila31d1d3c2017-10-17 23:08:11 +0300412
Jani Nikula143c3352019-01-18 14:01:24 +0200413 u16 pixel_blend_mode;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200414 } plane;
415
416 struct {
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200417 const struct drm_format_info *format;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200418 unsigned int stride;
José Roberto de Souza691f7ba2020-03-19 14:15:35 -0700419 u64 modifier;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200420 } fb;
Ville Syrjälä9eb04632020-04-29 13:10:25 +0300421
422 unsigned int fence_y_offset;
Ville Syrjälä6f745ba2019-11-27 22:12:13 +0200423 u16 gen9_wa_cfb_stride;
Ville Syrjälä97a978e2019-11-27 22:12:15 +0200424 s8 fence_id;
Paulo Zanoniaaf78d22016-01-19 11:35:42 -0200425 } state_cache;
426
Paulo Zanoni525a4f92017-07-14 16:38:22 -0300427 /*
428 * This structure contains everything that's relevant to program the
429 * hardware registers. When we want to figure out if we need to disable
430 * and re-enable FBC for a new configuration we just check if there's
431 * something different in the struct. The genx_fbc_activate functions
432 * are supposed to read from it in order to program the registers.
433 */
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200434 struct intel_fbc_reg_params {
435 struct {
436 enum pipe pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +0200437 enum i9xx_plane_id i9xx_plane;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200438 } crtc;
439
440 struct {
Ville Syrjälä801c8fe2016-11-18 21:53:04 +0200441 const struct drm_format_info *format;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200442 unsigned int stride;
Ville Syrjälä92e05752020-07-11 11:03:36 +0300443 u64 modifier;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200444 } fb;
445
446 int cfb_size;
Ville Syrjälä9eb04632020-04-29 13:10:25 +0300447 unsigned int fence_y_offset;
Ville Syrjälä6f745ba2019-11-27 22:12:13 +0200448 u16 gen9_wa_cfb_stride;
Ville Syrjälä97a978e2019-11-27 22:12:15 +0200449 s8 fence_id;
Ville Syrjälä8bdbe1b2019-11-27 22:12:14 +0200450 bool plane_visible;
Paulo Zanonib183b3f2015-12-23 18:28:11 -0200451 } params;
452
Paulo Zanonibf6189c2015-10-27 14:50:03 -0200453 const char *no_fbc_reason;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800454};
455
Chris Wilsonfe88d122016-12-31 11:20:12 +0000456/*
Vandana Kannan96178ee2015-01-10 02:25:56 +0530457 * HIGH_RR is the highest eDP panel refresh rate read from EDID
458 * LOW_RR is the lowest eDP panel refresh rate found from EDID
459 * parsing for same resolution.
460 */
461enum drrs_refresh_rate_type {
462 DRRS_HIGH_RR,
463 DRRS_LOW_RR,
464 DRRS_MAX_RR, /* RR count */
465};
466
467enum drrs_support_type {
468 DRRS_NOT_SUPPORTED = 0,
469 STATIC_DRRS_SUPPORT = 1,
470 SEAMLESS_DRRS_SUPPORT = 2
Pradeep Bhat439d7ac2014-04-05 12:13:28 +0530471};
472
Daniel Vetter2807cf62014-07-11 10:30:11 -0700473struct intel_dp;
Vandana Kannan96178ee2015-01-10 02:25:56 +0530474struct i915_drrs {
475 struct mutex mutex;
476 struct delayed_work work;
477 struct intel_dp *dp;
478 unsigned busy_frontbuffer_bits;
479 enum drrs_refresh_rate_type refresh_rate_type;
480 enum drrs_support_type type;
481};
482
Rodrigo Vivia031d702013-10-03 16:15:06 -0300483struct i915_psr {
Daniel Vetterf0355c42014-07-11 10:30:15 -0700484 struct mutex lock;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +0200485
486#define I915_PSR_DEBUG_MODE_MASK 0x0f
487#define I915_PSR_DEBUG_DEFAULT 0x00
488#define I915_PSR_DEBUG_DISABLE 0x01
489#define I915_PSR_DEBUG_ENABLE 0x02
Maarten Lankhorst2ac45bd2018-08-08 16:19:11 +0200490#define I915_PSR_DEBUG_FORCE_PSR1 0x03
Maarten Lankhorstc44301f2018-08-09 16:21:01 +0200491#define I915_PSR_DEBUG_IRQ 0x10
492
493 u32 debug;
Rodrigo Vivia031d702013-10-03 16:15:06 -0300494 bool sink_support;
José Roberto de Souza23ec9f52019-02-06 13:18:45 -0800495 bool enabled;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +0200496 struct intel_dp *dp;
José Roberto de Souzaf0ad62a2018-11-27 23:28:38 -0800497 enum pipe pipe;
José Roberto de Souza4ab4fa12019-08-20 15:33:23 -0700498 enum transcoder transcoder;
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -0700499 bool active;
Rodrigo Vivi5422b372018-06-13 12:26:00 -0700500 struct work_struct work;
Daniel Vetter9ca15302014-07-11 10:30:16 -0700501 unsigned busy_frontbuffer_bits;
José Roberto de Souza95f28d22018-03-28 15:30:42 -0700502 bool sink_psr2_support;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -0800503 bool link_standby;
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530504 bool colorimetry_support;
José Roberto de Souza95f28d22018-03-28 15:30:42 -0700505 bool psr2_enabled;
José Roberto de Souza26e5378d2018-03-28 15:30:44 -0700506 u8 sink_sync_latency;
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -0700507 ktime_t last_entry_attempt;
508 ktime_t last_exit;
José Roberto de Souza50a12d82018-11-21 14:54:38 -0800509 bool sink_not_reliable;
José Roberto de Souza183b8e62018-11-21 14:54:39 -0800510 bool irq_aux_error;
José Roberto de Souza8c0d2c22018-12-03 16:34:03 -0800511 u16 su_x_granularity;
Anshuman Gupta1c4d8212019-10-03 13:47:37 +0530512 bool dc3co_enabled;
513 u32 dc3co_exit_delay;
José Roberto de Souzaceaaf532020-02-05 13:49:45 -0800514 struct delayed_work dc3co_work;
José Roberto de Souzadf1a5bf2020-02-21 13:26:35 -0800515 bool force_mode_changed;
Gwan-gyeong Mun7a00e682020-05-14 09:07:32 +0300516 struct drm_dp_vsc_sdp vsc;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -0300517};
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700518
Keith Packard435793d2011-07-12 14:56:22 -0700519#define QUIRK_LVDS_SSC_DISABLE (1<<1)
Carsten Emde4dca20e2012-03-15 15:56:26 +0100520#define QUIRK_INVERT_BRIGHTNESS (1<<2)
Scot Doyle9c72cc62014-07-03 23:27:50 +0000521#define QUIRK_BACKLIGHT_PRESENT (1<<3)
Daniel Vetter656bfa32014-11-20 09:26:30 +0100522#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
Manasi Navarec99a2592017-06-30 09:33:48 -0700523#define QUIRK_INCREASE_T12_DELAY (1<<6)
Clint Taylor90c3e212018-07-10 13:02:05 -0700524#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
Jesse Barnesb690e962010-07-19 13:53:12 -0700525
Dave Airlie8be48d92010-03-30 05:34:14 +0000526struct intel_fbdev;
Chris Wilson1630fe72011-07-08 12:22:42 +0100527struct intel_fbc_work;
Dave Airlie38651672010-03-30 05:34:13 +0000528
Daniel Vetterc2b91522012-02-14 22:37:19 +0100529struct intel_gmbus {
530 struct i2c_adapter adapter;
Ville Syrjälä3e4d44e2016-03-07 17:56:59 +0200531#define GMBUS_FORCE_BIT_RETRY (1U << 31)
Chris Wilsonf2ce9fa2012-11-10 15:58:21 +0000532 u32 force_bit;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100533 u32 reg0;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200534 i915_reg_t gpio_reg;
Daniel Vetterc167a6f2012-02-28 00:43:09 +0100535 struct i2c_algo_bit_data bit_algo;
Daniel Vetterc2b91522012-02-14 22:37:19 +0100536 struct drm_i915_private *dev_priv;
537};
538
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100539struct i915_suspend_saved_registers {
Keith Packarde948e992008-05-07 12:27:53 +1000540 u32 saveDSPARB;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000541 u32 saveFBC_CONTROL;
Keith Packard1f84e552008-02-16 19:19:29 -0800542 u32 saveCACHE_MODE_0;
Keith Packard1f84e552008-02-16 19:19:29 -0800543 u32 saveMI_ARB_STATE;
Jesse Barnesba8bbcf2007-11-22 14:14:14 +1000544 u32 saveSWF0[16];
545 u32 saveSWF1[16];
Ville Syrjälä85fa7922015-09-18 20:03:43 +0300546 u32 saveSWF3[3];
Adam Jacksoncda2bb72011-07-26 16:53:06 -0400547 u32 savePCH_PORT_HOTPLUG;
Jesse Barnes9f49c372014-12-10 12:16:05 -0800548 u16 saveGCDGMBUS;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100549};
Daniel Vetterc85aa882012-11-02 19:55:03 +0100550
Daniele Ceraolo Spurio1bcd8682019-08-19 19:01:46 -0700551struct vlv_s0ix_state;
Imre Deakddeea5b2014-05-05 15:19:56 +0300552
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700553#define MAX_L3_SLICES 2
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100554struct intel_l3_parity {
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700555 u32 *remap_info[MAX_L3_SLICES];
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100556 struct work_struct error_work;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700557 int which_slice;
Daniel Vettera4da4fa2012-11-02 19:55:07 +0100558};
559
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100560struct i915_gem_mm {
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100561 /** Memory allocator for GTT stolen memory */
562 struct drm_mm stolen;
Paulo Zanoni92e97d22015-07-02 19:25:09 -0300563 /** Protects the usage of the GTT stolen memory allocator. This is
564 * always the inner lock when overlapping with struct_mutex. */
565 struct mutex stolen_lock;
566
Chris Wilsonf2123812017-10-16 12:40:37 +0100567 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
568 spinlock_t obj_lock;
569
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100570 /**
Chris Wilsonecab9be2019-06-12 11:57:20 +0100571 * List of objects which are purgeable.
Chris Wilson3b4fa962019-05-30 21:34:59 +0100572 */
573 struct list_head purge_list;
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100574
Chris Wilsonecab9be2019-06-12 11:57:20 +0100575 /**
576 * List of objects which have allocated pages and are shrinkable.
577 */
578 struct list_head shrink_list;
579
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100580 /**
581 * List of objects which are pending destruction.
582 */
583 struct llist_head free_list;
584 struct work_struct free_work;
Chris Wilsonc9c704712018-02-19 22:06:31 +0000585 /**
586 * Count of objects pending destructions. Used to skip needlessly
587 * waiting on an RCU barrier if no objects are waiting to be freed.
588 */
589 atomic_t free_count;
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100590
Chris Wilson66df1012017-08-22 18:38:28 +0100591 /**
592 * Small stash of WC pages
593 */
Chris Wilson63fd6592018-07-04 19:55:18 +0100594 struct pagestash wc_stash;
Chris Wilson66df1012017-08-22 18:38:28 +0100595
Matthew Auld465c4032017-10-06 23:18:14 +0100596 /**
597 * tmpfs instance used for shmem backed objects
598 */
599 struct vfsmount *gemfs;
600
Abdiel Janulgue3aae9d02019-10-18 10:07:49 +0100601 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
602
Chris Wilson2cfcd322014-05-20 08:28:43 +0100603 struct notifier_block oom_notifier;
Chris Wilsone87666b2016-04-04 14:46:43 +0100604 struct notifier_block vmap_notifier;
Chris Wilsonceabbba52014-03-25 13:23:04 +0000605 struct shrinker shrinker;
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100606
Chris Wilson8a2421b2017-06-16 15:05:22 +0100607 /**
608 * Workqueue to fault in userptr pages, flushed by the execbuf
609 * when required but otherwise left to userspace to try again
610 * on EAGAIN.
611 */
612 struct workqueue_struct *userptr_wq;
613
Chris Wilsond82b4b22019-05-30 21:35:00 +0100614 /* shrinker accounting, also useful for userland debugging */
615 u64 shrink_memory;
616 u32 shrink_count;
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100617};
618
Chris Wilsonee42c002017-12-11 19:41:34 +0000619#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
620
Chris Wilson16dc2242020-05-09 11:50:21 +0100621unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
622 u64 context);
Chris Wilsonb52992c2016-10-28 13:58:24 +0100623
Chris Wilson16dc2242020-05-09 11:50:21 +0100624static inline unsigned long
625i915_fence_timeout(const struct drm_i915_private *i915)
626{
627 return i915_fence_context_timeout(i915, U64_MAX);
628}
Chris Wilson1fd00c0f2018-06-02 11:48:53 +0100629
Stanislav Lisovskiy9b93daa92019-11-25 18:08:00 +0200630/* Amount of SAGV/QGV points, BSpec precisely defines this */
631#define I915_NUM_QGV_POINTS 8
632
Paulo Zanoni6acab152013-09-12 17:06:24 -0300633struct ddi_vbt_port_info {
Jani Nikula7679f9b2019-05-31 16:14:52 +0300634 /* Non-NULL if port present. */
635 const struct child_device_config *child;
636
Ville Syrjäläd6038612017-10-30 16:57:02 +0200637 int max_tmds_clock;
638
Jani Nikula7a0073d2019-11-08 17:39:48 +0200639 /* This is an index in the HDMI/DVI DDI buffer translation table. */
Jani Nikula143c3352019-01-18 14:01:24 +0200640 u8 hdmi_level_shift;
Jani Nikula7a0073d2019-11-08 17:39:48 +0200641 u8 hdmi_level_shift_set:1;
Paulo Zanoni311a2092013-09-12 17:12:18 -0300642
Jani Nikula143c3352019-01-18 14:01:24 +0200643 u8 supports_dvi:1;
644 u8 supports_hdmi:1;
645 u8 supports_dp:1;
646 u8 supports_edp:1;
647 u8 supports_typec_usb:1;
648 u8 supports_tbt:1;
Rodrigo Vivi500ea702015-08-07 17:01:16 -0700649
Jani Nikula143c3352019-01-18 14:01:24 +0200650 u8 alternate_aux_channel;
651 u8 alternate_ddc_pin;
Antti Koskipaa75067dd2015-07-10 14:10:55 +0300652
Jani Nikula143c3352019-01-18 14:01:24 +0200653 u8 dp_boost_level;
654 u8 hdmi_boost_level;
Jani Nikula99b91bd2018-02-01 13:03:43 +0200655 int dp_max_link_rate; /* 0 for not limited by VBT */
Paulo Zanoni6acab152013-09-12 17:06:24 -0300656};
657
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -0800658enum psr_lines_to_wait {
659 PSR_0_LINES_TO_WAIT = 0,
660 PSR_1_LINE_TO_WAIT,
661 PSR_4_LINES_TO_WAIT,
662 PSR_8_LINES_TO_WAIT
Pradeep Bhat83a72802014-03-28 10:14:57 +0530663};
664
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300665struct intel_vbt_data {
666 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
667 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
668
669 /* Feature bits */
670 unsigned int int_tv_support:1;
671 unsigned int lvds_dither:1;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300672 unsigned int int_crt_support:1;
673 unsigned int lvds_use_ssc:1;
Ville Syrjälä5255e2f2018-05-08 17:08:14 +0300674 unsigned int int_lvds_support:1;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300675 unsigned int display_clock_mode:1;
676 unsigned int fdi_rx_polarity_inverted:1;
Ville Syrjälä3e845c72016-04-08 16:28:12 +0300677 unsigned int panel_type:4;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300678 int lvds_ssc_freq;
679 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
Ville Syrjäläc1cd5b22018-10-22 17:20:15 +0300680 enum drm_panel_orientation orientation;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300681
Pradeep Bhat83a72802014-03-28 10:14:57 +0530682 enum drrs_support_type drrs_type;
683
Jani Nikula6aa23e62016-03-24 17:50:20 +0200684 struct {
685 int rate;
686 int lanes;
687 int preemphasis;
688 int vswing;
Jani Nikula06411f02016-03-24 17:50:21 +0200689 bool low_vswing;
Jani Nikula6aa23e62016-03-24 17:50:20 +0200690 bool initialized;
Jani Nikula6aa23e62016-03-24 17:50:20 +0200691 int bpp;
692 struct edp_power_seq pps;
693 } edp;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300694
Jani Nikulaf00076d2013-12-14 20:38:29 -0200695 struct {
Dhinakaran Pandiyan2bdd0452018-05-08 17:35:24 -0700696 bool enable;
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -0800697 bool full_link;
698 bool require_aux_wakeup;
699 int idle_frames;
700 enum psr_lines_to_wait lines_to_wait;
Vathsala Nagaraju77312ae2018-05-22 14:57:23 +0530701 int tp1_wakeup_time_us;
702 int tp2_tp3_wakeup_time_us;
José Roberto de Souza88a0d962019-03-12 12:57:41 -0700703 int psr2_tp2_tp3_wakeup_time_us;
Rodrigo Vivibfd7ebd2014-11-14 08:52:30 -0800704 } psr;
705
706 struct {
Jani Nikulaf00076d2013-12-14 20:38:29 -0200707 u16 pwm_freq_hz;
Jani Nikula39fbc9c2014-04-09 11:22:06 +0300708 bool present;
Jani Nikulaf00076d2013-12-14 20:38:29 -0200709 bool active_low_pwm;
Jani Nikula1de60682014-06-24 18:27:39 +0300710 u8 min_brightness; /* min_brightness/255 of max */
Vidya Srinivasadd03372016-12-08 11:26:18 +0200711 u8 controller; /* brightness controller number */
Deepak M9a41e172016-04-26 16:14:24 +0300712 enum intel_backlight_type type;
Jani Nikulaf00076d2013-12-14 20:38:29 -0200713 } backlight;
714
Shobhit Kumard17c5442013-08-27 15:12:25 +0300715 /* MIPI DSI */
716 struct {
717 u16 panel_id;
Shobhit Kumard3b542f2014-04-14 11:00:34 +0530718 struct mipi_config *config;
719 struct mipi_pps_data *pps;
Madhav Chauhan46e58322017-10-13 18:14:59 +0530720 u16 bl_ports;
721 u16 cabc_ports;
Shobhit Kumard3b542f2014-04-14 11:00:34 +0530722 u8 seq_version;
723 u32 size;
724 u8 *data;
Jani Nikula8d3ed2f2015-12-21 15:10:57 +0200725 const u8 *sequence[MIPI_SEQ_MAX];
Hans de Goedefb38e7a2018-02-14 09:21:51 +0100726 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
Ville Syrjäläc1cd5b22018-10-22 17:20:15 +0300727 enum drm_panel_orientation orientation;
Shobhit Kumard17c5442013-08-27 15:12:25 +0300728 } dsi;
729
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300730 int crt_ddc_pin;
731
Jani Nikula0d9ef192019-11-08 17:39:49 +0200732 struct list_head display_devices;
Paulo Zanoni6acab152013-09-12 17:06:24 -0300733
734 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
Jani Nikula9d6c8752016-03-24 17:50:22 +0200735 struct sdvo_device_mapping sdvo_mappings[2];
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300736};
737
Ville Syrjälä77c122b2013-08-06 22:24:04 +0300738enum intel_ddb_partitioning {
739 INTEL_DDB_PART_1_2,
740 INTEL_DDB_PART_5_6, /* IVB+ */
741};
742
Imre Deak820c1982013-12-17 14:46:36 +0200743struct ilk_wm_values {
Jani Nikula143c3352019-01-18 14:01:24 +0200744 u32 wm_pipe[3];
745 u32 wm_lp[3];
746 u32 wm_lp_spr[3];
Ville Syrjälä609cede2013-10-09 19:18:03 +0300747 bool enable_fbc_wm;
748 enum intel_ddb_partitioning partitioning;
749};
750
Ville Syrjälä114d7dc2017-04-21 21:14:21 +0300751struct g4x_pipe_wm {
Jani Nikula143c3352019-01-18 14:01:24 +0200752 u16 plane[I915_MAX_PLANES];
753 u16 fbc;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300754};
755
Ville Syrjälä114d7dc2017-04-21 21:14:21 +0300756struct g4x_sr_wm {
Jani Nikula143c3352019-01-18 14:01:24 +0200757 u16 plane;
758 u16 cursor;
759 u16 fbc;
Ville Syrjälä1b313892016-11-28 19:37:08 +0200760};
761
762struct vlv_wm_ddl_values {
Jani Nikula143c3352019-01-18 14:01:24 +0200763 u8 plane[I915_MAX_PLANES];
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300764};
765
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200766struct vlv_wm_values {
Ville Syrjälä114d7dc2017-04-21 21:14:21 +0300767 struct g4x_pipe_wm pipe[3];
768 struct g4x_sr_wm sr;
Ville Syrjälä1b313892016-11-28 19:37:08 +0200769 struct vlv_wm_ddl_values ddl[3];
Jani Nikula143c3352019-01-18 14:01:24 +0200770 u8 level;
Ville Syrjälä6eb1a682015-06-24 22:00:03 +0300771 bool cxsr;
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200772};
773
Ville Syrjälä04548cb2017-04-21 21:14:29 +0300774struct g4x_wm_values {
775 struct g4x_pipe_wm pipe[2];
776 struct g4x_sr_wm sr;
777 struct g4x_sr_wm hpll;
778 bool cxsr;
779 bool hpll_en;
780 bool fbc_en;
781};
782
Damien Lespiauc1939242014-11-04 17:06:41 +0000783struct skl_ddb_entry {
Jani Nikula143c3352019-01-18 14:01:24 +0200784 u16 start, end; /* in number of blocks, 'end' is exclusive */
Damien Lespiauc1939242014-11-04 17:06:41 +0000785};
786
Jani Nikula143c3352019-01-18 14:01:24 +0200787static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
Damien Lespiauc1939242014-11-04 17:06:41 +0000788{
Damien Lespiau16160e32014-11-04 17:06:53 +0000789 return entry->end - entry->start;
Damien Lespiauc1939242014-11-04 17:06:41 +0000790}
791
Damien Lespiau08db6652014-11-04 17:06:52 +0000792static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
793 const struct skl_ddb_entry *e2)
794{
795 if (e1->start == e2->start && e1->end == e2->end)
796 return true;
797
798 return false;
799}
800
Daniel Vetterf99d7062014-06-19 16:01:59 +0200801struct i915_frontbuffer_tracking {
Chris Wilsonb5add952016-08-04 16:32:36 +0100802 spinlock_t lock;
Daniel Vetterf99d7062014-06-19 16:01:59 +0200803
804 /*
805 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
806 * scheduled flips.
807 */
808 unsigned busy_bits;
809 unsigned flip_bits;
810};
811
Yu Zhangcf9d2892015-02-10 19:05:47 +0800812struct i915_virtual_gpu {
Xiaolin Zhang52988002019-08-23 14:57:31 +0800813 struct mutex lock; /* serialises sending of g2v_notify command pkts */
Yu Zhangcf9d2892015-02-10 19:05:47 +0800814 bool active;
Tina Zhang8a4ab662017-08-14 15:20:46 +0800815 u32 caps;
Yu Zhangcf9d2892015-02-10 19:05:47 +0800816};
817
Ville Syrjälä0bb94e02020-01-20 19:47:17 +0200818struct intel_cdclk_config {
Imre Deakb6c51c32018-01-17 19:25:08 +0200819 unsigned int cdclk, vco, ref, bypass;
Ville Syrjälä64600bd2017-10-24 12:52:08 +0300820 u8 voltage_level;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200821};
822
Chris Wilsonf05816c2019-11-01 10:15:28 +0000823struct i915_selftest_stash {
824 atomic_t counter;
825};
826
Jani Nikula77fec552014-03-31 14:27:22 +0300827struct drm_i915_private {
Chris Wilson8f460e22016-06-24 14:00:18 +0100828 struct drm_device drm;
829
Daniel Vetter7fb81e92020-03-23 15:49:07 +0100830 /* FIXME: Device release actions should all be moved to drmm_ */
831 bool do_release;
832
Jani Nikula2cc83762018-12-31 16:56:46 +0200833 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
Jani Nikula02584042018-12-31 16:56:41 +0200834 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
Chris Wilson3fed1802018-02-07 21:05:43 +0000835 struct intel_driver_caps caps;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100836
Matthew Auld77894222017-12-11 15:18:18 +0000837 /**
838 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
839 * end of stolen which we can optionally use to create GEM objects
Matthew Auldb1ace602017-12-11 15:18:21 +0000840 * backed by stolen memory. Note that stolen_usable_size tells us
Matthew Auld77894222017-12-11 15:18:18 +0000841 * exactly how much of this we are actually allowed to use, given that
842 * some portion of it is in fact reserved for use by hardware functions.
843 */
844 struct resource dsm;
Matthew Auld17a05342017-12-11 15:18:19 +0000845 /**
846 * Reseved portion of Data Stolen Memory
847 */
848 struct resource dsm_reserved;
Matthew Auld77894222017-12-11 15:18:18 +0000849
Matthew Auldb1ace602017-12-11 15:18:21 +0000850 /*
851 * Stolen memory is segmented in hardware with different portions
852 * offlimits to certain functions.
853 *
854 * The drm_mm is initialised to the total accessible range, as found
855 * from the PCI config. On Broadwell+, this is further restricted to
856 * avoid the first page! The upper end of stolen memory is reserved for
857 * hardware functions and similarly removed from the accessible range.
858 */
Matthew Auldb7128ef2017-12-11 15:18:22 +0000859 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
Matthew Auldb1ace602017-12-11 15:18:21 +0000860
Chris Wilson907b28c2013-07-19 20:36:52 +0100861 struct intel_uncore uncore;
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +0100862 struct intel_uncore_mmio_debug mmio_debug;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100863
Yu Zhangcf9d2892015-02-10 19:05:47 +0800864 struct i915_virtual_gpu vgpu;
865
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +0800866 struct intel_gvt *gvt;
Zhi Wang0ad35fe2016-06-16 08:07:00 -0400867
Jackie Li6b0478f2018-03-13 17:32:50 -0700868 struct intel_wopcm wopcm;
869
Daniel Vettereb805622015-05-04 14:58:44 +0200870 struct intel_csr csr;
871
Jani Nikula5ea6e5e2015-04-01 10:55:04 +0300872 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
Daniel Vetter28c70f12012-12-01 13:53:45 +0100873
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100874 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
875 * controller on different i2c buses. */
876 struct mutex gmbus_mutex;
877
878 /**
Lucas De Marchidce88872018-07-27 12:36:47 -0700879 * Base address of where the gmbus and gpio blocks are located (either
880 * on PCH or on SoC for platforms without PCH).
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100881 */
Jani Nikula143c3352019-01-18 14:01:24 +0200882 u32 gpio_mmio_base;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100883
José Roberto de Souza4ab4fa12019-08-20 15:33:23 -0700884 u32 hsw_psr_mmio_adjust;
885
Shashank Sharmab6fdd0f2014-05-19 20:54:03 +0530886 /* MMIO base address for MIPI regs */
Jani Nikula143c3352019-01-18 14:01:24 +0200887 u32 mipi_mmio_base;
Shashank Sharmab6fdd0f2014-05-19 20:54:03 +0530888
Jani Nikula143c3352019-01-18 14:01:24 +0200889 u32 pps_mmio_base;
Imre Deak44cb7342016-08-10 14:07:29 +0300890
Daniel Vetter28c70f12012-12-01 13:53:45 +0100891 wait_queue_head_t gmbus_wait_queue;
892
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100893 struct pci_dev *bridge_dev;
Chris Wilson750e76b2019-08-06 13:43:00 +0100894
Chris Wilson750e76b2019-08-06 13:43:00 +0100895 struct rb_root uabi_engines;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100896
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100897 struct resource mch_res;
898
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100899 /* protects the irq masks */
900 spinlock_t irq_lock;
901
Imre Deakf8b79e52014-03-04 19:23:07 +0200902 bool display_irqs_enabled;
903
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100904 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
905 struct pm_qos_request pm_qos;
906
Ville Syrjäläa5805162015-05-26 20:42:30 +0300907 /* Sideband mailbox protection */
908 struct mutex sb_lock;
Chris Wilsona75d0352019-04-26 09:17:18 +0100909 struct pm_qos_request sb_qos;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100910
911 /** Cached value of IMR to avoid reads in updating the bitfield */
Ben Widawskyabd58f02013-11-02 21:07:09 -0700912 union {
913 u32 irq_mask;
914 u32 de_irq_mask[I915_MAX_PIPES];
915 };
Imre Deak91d181d2014-02-10 18:42:49 +0200916 u32 pipestat_irq_mask[I915_MAX_PIPES];
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100917
Jani Nikula5fcece82015-05-27 15:03:42 +0300918 struct i915_hotplug hotplug;
Paulo Zanoniab34a7e2016-01-11 17:44:36 -0200919 struct intel_fbc fbc;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +0530920 struct i915_drrs drrs;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100921 struct intel_opregion opregion;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -0300922 struct intel_vbt_data vbt;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100923
Jesse Barnesd9ceb812014-10-09 12:57:43 -0700924 bool preserve_bios_swizzle;
925
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100926 /* overlay */
927 struct intel_overlay *overlay;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100928
Jani Nikula58c68772013-11-08 16:48:54 +0200929 /* backlight registers and fields in struct intel_panel */
Daniel Vetter07f11d42014-09-15 14:35:09 +0200930 struct mutex backlight_lock;
Jani Nikula31ad8ec2013-04-02 15:48:09 +0300931
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300932 /* protects panel power sequencer state */
933 struct mutex pps_mutex;
934
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100935 unsigned int fsb_freq, mem_freq, is_ddr3;
Ville Syrjäläb2045352016-05-13 23:41:27 +0300936 unsigned int skl_preferred_vco_freq;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200937 unsigned int max_cdclk_freq;
Ville Syrjälä8d965612016-11-14 18:35:10 +0200938
Mika Kaholaadafdc62015-08-18 14:36:59 +0300939 unsigned int max_dotclk_freq;
Ville Syrjälä6bcda4f2014-10-07 17:41:22 +0300940 unsigned int hpll_freq;
Chris Wilson58ecd9d2017-11-05 13:49:05 +0000941 unsigned int fdi_pll_freq;
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300942 unsigned int czclk_freq;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100943
Ville Syrjälä63911d72016-05-13 23:41:32 +0300944 struct {
Ville Syrjälä0bb94e02020-01-20 19:47:17 +0200945 /* The current hardware cdclk configuration */
946 struct intel_cdclk_config hw;
Ville Syrjälä905801f2019-03-20 15:54:36 +0200947
Matt Roper736da812019-09-10 09:15:06 -0700948 /* cdclk, divider, and ratio table from bspec */
949 const struct intel_cdclk_vals *table;
Ville Syrjälä28a30b42020-01-21 16:03:53 +0200950
951 struct intel_global_obj obj;
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200952 } cdclk;
Ville Syrjälä63911d72016-05-13 23:41:32 +0300953
Daniel Vetter645416f2013-09-02 16:22:25 +0200954 /**
955 * wq - Driver workqueue for GEM.
956 *
957 * NOTE: Work items scheduled here are not allowed to grab any modeset
958 * locks, for otherwise the flushing done in the pageflip code will
959 * result in deadlocks.
960 */
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100961 struct workqueue_struct *wq;
962
Ville Syrjälä757fffc2017-11-13 15:36:22 +0200963 /* ordered wq for modesets */
964 struct workqueue_struct *modeset_wq;
Ville Syrjäläc26a0582019-09-10 15:13:47 +0300965 /* unbound hipri wq for page flips/plane updates */
966 struct workqueue_struct *flip_wq;
Ville Syrjälä757fffc2017-11-13 15:36:22 +0200967
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100968 /* Display functions */
969 struct drm_i915_display_funcs display;
970
971 /* PCH chipset type */
972 enum intel_pch pch_type;
Paulo Zanoni17a303e2012-11-20 15:12:07 -0200973 unsigned short pch_id;
Daniel Vetterf4c956a2012-11-02 19:55:02 +0100974
975 unsigned long quirks;
976
Maarten Lankhorste2c8b872016-02-16 10:06:14 +0100977 struct drm_atomic_state *modeset_restore_state;
Maarten Lankhorst73974892016-08-05 23:28:27 +0300978 struct drm_modeset_acquire_ctx reset_ctx;
Eric Anholt673a3942008-07-30 12:06:12 -0700979
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200980 struct i915_ggtt ggtt; /* VM representing the global address space */
Ben Widawsky5d4545a2013-01-17 12:45:15 -0800981
Daniel Vetter4b5aed62012-11-14 17:14:03 +0100982 struct i915_gem_mm mm;
Chris Wilsonad46cb52014-08-07 14:20:40 +0100983 DECLARE_HASHTABLE(mm_structs, 7);
984 struct mutex mm_lock;
Daniel Vetter87813422012-05-02 11:49:32 +0200985
Daniel Vetter87813422012-05-02 11:49:32 +0200986 /* Kernel Modesetting */
987
Ville Syrjäläe2af48c2016-10-31 22:37:05 +0200988 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
989 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
Kristian Høgsberg6b95a202009-11-18 11:25:18 -0500990
Imre Deak353ad952020-02-26 22:34:45 +0200991 /**
992 * dpll and cdclk state is protected by connection_mutex
993 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
994 * Must be global rather than per dpll, because on some platforms plls
995 * share registers.
Maarten Lankhorstfbf6d872016-03-23 14:51:12 +0100996 */
Imre Deak353ad952020-02-26 22:34:45 +0200997 struct {
998 struct mutex lock;
999
1000 int num_shared_dpll;
1001 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1002 const struct intel_dpll_mgr *mgr;
Imre Deakccc495f2020-02-28 17:33:28 +02001003
1004 struct {
1005 int nssc;
1006 int ssc;
1007 } ref_clks;
Imre Deak353ad952020-02-26 22:34:45 +02001008 } dpll;
Maarten Lankhorstfbf6d872016-03-23 14:51:12 +01001009
Ville Syrjälä0ef19052020-01-20 19:47:24 +02001010 struct list_head global_obj_list;
1011
Ville Syrjälä1d5a95b2019-10-15 22:30:24 +03001012 /*
Ville Syrjälä28a30b42020-01-21 16:03:53 +02001013 * For reading active_pipes holding any crtc lock is
1014 * sufficient, for writing must hold all of them.
Ville Syrjälä1d5a95b2019-10-15 22:30:24 +03001015 */
Ville Syrjäläd06a79d2019-08-21 20:30:29 +03001016 u8 active_pipes;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01001017
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001018 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
Jesse Barnesee7b9f92012-04-20 17:11:53 +01001019
Tvrtko Ursulin25d140f2018-12-03 13:33:19 +00001020 struct i915_wa_list gt_wa_list;
Arun Siluvery888b5992014-08-26 14:44:51 +01001021
Daniel Vetterf99d7062014-06-19 16:01:59 +02001022 struct i915_frontbuffer_tracking fb_tracking;
1023
Chris Wilsoneb955ee2017-01-23 21:29:39 +00001024 struct intel_atomic_helper {
1025 struct llist_head free_list;
1026 struct work_struct free_work;
1027 } atomic_helper;
1028
Zhenyu Wangc48044112009-12-17 14:48:43 +08001029 bool mchbar_need_disable;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001030
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001031 struct intel_l3_parity l3_parity;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001032
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07001033 /*
1034 * edram size in MB.
1035 * Cannot be determined by PCIID. You must always read a register.
1036 */
1037 u32 edram_size_mb;
Ben Widawsky59124502013-07-04 11:02:05 -07001038
Imre Deak83c00f52013-10-25 17:36:47 +03001039 struct i915_power_domains power_domains;
Wang Xingchaoa38911a2013-05-30 22:07:11 +08001040
Rodrigo Vivia031d702013-10-03 16:15:06 -03001041 struct i915_psr psr;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001042
Daniel Vetter99584db2012-11-14 17:14:04 +01001043 struct i915_gpu_error gpu_error;
Chris Wilsonae681d92010-10-01 14:57:56 +01001044
Jesse Barnesc9cddff2013-05-08 10:45:13 -07001045 struct drm_i915_gem_object *vlv_pctx;
1046
Dave Airlie8be48d92010-03-30 05:34:14 +00001047 /* list of fbdev register on this device */
1048 struct intel_fbdev *fbdev;
Chris Wilson82e3b8c2014-08-13 13:09:46 +01001049 struct work_struct fbdev_suspend_work;
Chris Wilsone953fd72011-02-21 22:23:52 +00001050
1051 struct drm_property *broadcast_rgb_property;
Chris Wilson3f43c482011-05-12 22:17:24 +01001052 struct drm_property *force_audio_property;
Ben Widawskye3689192012-05-25 16:56:22 -07001053
Imre Deak58fddc22015-01-08 17:54:14 +02001054 /* hda/i915 audio component */
David Henningsson51e1d832015-08-19 10:48:56 +02001055 struct i915_audio_component *audio_component;
Imre Deak58fddc22015-01-08 17:54:14 +02001056 bool audio_component_registered;
Libin Yang4a21ef72015-09-02 14:11:39 +08001057 /**
1058 * av_mutex - mutex for audio/video sync
1059 *
1060 */
1061 struct mutex av_mutex;
Ville Syrjälä905801f2019-03-20 15:54:36 +02001062 int audio_power_refcount;
Kai Vehmanen87c16942019-09-20 11:39:18 +03001063 u32 audio_freq_cntrl;
Imre Deak58fddc22015-01-08 17:54:14 +02001064
Damien Lespiau3e683202012-12-11 18:48:29 +00001065 u32 fdi_rx_config;
Paulo Zanoni68d18ad2012-12-01 12:04:26 -02001066
Ville Syrjäläc2317752016-03-15 16:39:56 +02001067 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
Ville Syrjälä70722462015-04-10 18:21:28 +03001068 u32 chv_phy_control;
Ville Syrjäläc2317752016-03-15 16:39:56 +02001069 /*
1070 * Shadows for CHV DPLL_MD regs to keep the state
1071 * checker somewhat working in the presence hardware
1072 * crappiness (can't read out DPLL_MD for pipes B & C).
1073 */
1074 u32 chv_dpll_md[I915_MAX_PIPES];
Imre Deakadc7f042016-04-04 17:27:10 +03001075 u32 bxt_phy_grc;
Ville Syrjälä70722462015-04-10 18:21:28 +03001076
Daniel Vetter842f1c82014-03-10 10:01:44 +01001077 u32 suspend_count;
Imre Deak0f906032018-03-22 16:36:42 +02001078 bool power_domains_suspended;
Daniel Vetterf4c956a2012-11-02 19:55:02 +01001079 struct i915_suspend_saved_registers regfile;
Daniele Ceraolo Spurio1bcd8682019-08-19 19:01:46 -07001080 struct vlv_s0ix_state *vlv_s0ix_state;
Daniel Vetter231f42a2012-11-02 19:55:05 +01001081
Lyude656d1b82016-08-17 15:55:54 -04001082 enum {
Paulo Zanoni16dcdc42016-09-22 18:00:27 -03001083 I915_SAGV_UNKNOWN = 0,
1084 I915_SAGV_DISABLED,
1085 I915_SAGV_ENABLED,
1086 I915_SAGV_NOT_CONTROLLED
1087 } sagv_status;
Lyude656d1b82016-08-17 15:55:54 -04001088
James Ausmusb068a862019-10-09 10:23:14 -07001089 u32 sagv_block_time_us;
1090
Ville Syrjälä53615a52013-08-01 16:18:50 +03001091 struct {
1092 /*
1093 * Raw watermark latency values:
1094 * in 0.1us units for WM0,
1095 * in 0.5us units for WM1+.
1096 */
1097 /* primary */
Jani Nikula143c3352019-01-18 14:01:24 +02001098 u16 pri_latency[5];
Ville Syrjälä53615a52013-08-01 16:18:50 +03001099 /* sprite */
Jani Nikula143c3352019-01-18 14:01:24 +02001100 u16 spr_latency[5];
Ville Syrjälä53615a52013-08-01 16:18:50 +03001101 /* cursor */
Jani Nikula143c3352019-01-18 14:01:24 +02001102 u16 cur_latency[5];
Pradeep Bhat2af30a52014-11-04 17:06:38 +00001103 /*
1104 * Raw watermark memory latency values
1105 * for SKL for all 8 levels
1106 * in 1us units.
1107 */
Jani Nikula143c3352019-01-18 14:01:24 +02001108 u16 skl_latency[8];
Ville Syrjälä609cede2013-10-09 19:18:03 +03001109
1110 /* current hardware state */
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00001111 union {
1112 struct ilk_wm_values hw;
Ville Syrjälä0018fda2015-03-05 21:19:45 +02001113 struct vlv_wm_values vlv;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03001114 struct g4x_wm_values g4x;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00001115 };
Ville Syrjälä58590c12015-09-08 21:05:12 +03001116
Jani Nikula143c3352019-01-18 14:01:24 +02001117 u8 max_level;
Matt Ropered4a6a72016-02-23 17:20:13 -08001118
1119 /*
1120 * Should be held around atomic WM register writing; also
1121 * protects * intel_crtc->wm.active and
Maarten Lankhorstec193642019-06-28 10:55:17 +02001122 * crtc_state->wm.need_postvbl_update.
Matt Ropered4a6a72016-02-23 17:20:13 -08001123 */
1124 struct mutex wm_mutex;
Matt Roper279e99d2016-05-12 07:06:02 -07001125
1126 /*
1127 * Set during HW readout of watermarks/DDB. Some platforms
1128 * need to know when we're still using BIOS-provided values
1129 * (which we don't fully trust).
1130 */
1131 bool distrust_bios_wm;
Ville Syrjälä53615a52013-08-01 16:18:50 +03001132 } wm;
1133
Stanislav Lisovskiy0f0f9ae2020-02-03 01:06:29 +02001134 u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
Stanislav Lisovskiy072fcc32020-02-03 01:06:25 +02001135
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301136 struct dram_info {
1137 bool valid;
Mahesh Kumar86b59282018-08-31 16:39:42 +05301138 bool is_16gb_dimm;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301139 u8 num_channels;
Ville Syrjälä80373fb2019-03-06 22:35:40 +02001140 u8 ranks;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301141 u32 bandwidth_kbps;
Mahesh Kumar8a6c5442018-08-24 15:02:25 +05301142 bool symmetric_memory;
Ville Syrjäläb185a352019-03-06 22:35:51 +02001143 enum intel_dram_type {
1144 INTEL_DRAM_UNKNOWN,
1145 INTEL_DRAM_DDR3,
1146 INTEL_DRAM_DDR4,
1147 INTEL_DRAM_LPDDR3,
1148 INTEL_DRAM_LPDDR4
1149 } type;
Mahesh Kumarcbfa59d2018-08-24 15:02:21 +05301150 } dram_info;
1151
Ville Syrjäläc457d9c2019-05-24 18:36:14 +03001152 struct intel_bw_info {
Stanislav Lisovskiy9b93daa92019-11-25 18:08:00 +02001153 /* for each QGV point */
1154 unsigned int deratedbw[I915_NUM_QGV_POINTS];
Ville Syrjälä56e93712019-06-06 15:42:10 +03001155 u8 num_qgv_points;
1156 u8 num_planes;
Ville Syrjäläc457d9c2019-05-24 18:36:14 +03001157 } max_bw[6];
1158
Ville Syrjäläfd1a9bb2020-01-20 19:47:25 +02001159 struct intel_global_obj bw_obj;
Ville Syrjäläc457d9c2019-05-24 18:36:14 +03001160
Daniele Ceraolo Spurio1bf676c2019-06-13 16:21:52 -07001161 struct intel_runtime_pm runtime_pm;
Paulo Zanoni8a187452013-12-06 20:32:13 -02001162
Chris Wilson8f8b1172019-10-07 22:09:41 +01001163 struct i915_perf perf;
Robert Braggeec688e2016-11-07 19:49:47 +00001164
Oscar Mateoa83014d2014-07-24 17:04:21 +01001165 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
Tvrtko Ursuline5be5c72019-06-21 08:07:40 +01001166 struct intel_gt gt;
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001167
1168 struct {
Chris Wilsona4e7ccd2019-10-04 14:40:09 +01001169 struct i915_gem_contexts {
1170 spinlock_t lock; /* locks list */
1171 struct list_head list;
1172
1173 struct llist_head free_list;
1174 struct work_struct free_work;
1175 } contexts;
Chris Wilsonf17b8982020-01-01 14:10:07 +00001176
1177 /*
1178 * We replace the local file with a global mappings as the
1179 * backing storage for the mmap is on the device and not
1180 * on the struct file, and we do not want to prolong the
1181 * lifetime of the local fd. To minimise the number of
1182 * anonymous inodes we create, we use a global singleton to
1183 * share the global mapping.
1184 */
1185 struct file *mmap_singleton;
Chris Wilson23c3c3d2019-04-24 21:07:14 +01001186 } gem;
Oscar Mateoa83014d2014-07-24 17:04:21 +01001187
Ville Syrjälädd5279c2019-10-22 21:56:43 +03001188 u8 pch_ssc_use;
1189
Ville Syrjälä7d423af2019-10-03 17:02:31 +03001190 /* For i915gm/i945gm vblank irq workaround */
1191 u8 vblank_enabled;
Ville Syrjäläd938da62019-03-22 20:08:03 +02001192
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001193 /* perform PHY state sanity checks? */
1194 bool chv_phy_assert[2];
1195
Mahesh Kumara3a89862016-12-01 21:19:34 +05301196 bool ipc_enabled;
1197
Pandiyan, Dhinakaranf9318942016-09-21 13:02:48 -07001198 /* Used to save the pipe-to-encoder mapping for audio */
1199 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
Takashi Iwai0bdf5a02015-11-30 18:19:39 +01001200
Jerome Anandeef57322017-01-25 04:27:49 +05301201 /* necessary resource sharing with HDMI LPE audio driver. */
1202 struct {
1203 struct platform_device *platdev;
1204 int irq;
1205 } lpe_audio;
1206
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001207 struct i915_pmu pmu;
1208
Ramalingam C9055aac2019-02-16 23:06:51 +05301209 struct i915_hdcp_comp_master *hdcp_master;
1210 bool hdcp_comp_added;
1211
1212 /* Mutex to protect the above hdcp component related values. */
1213 struct mutex hdcp_comp_mutex;
1214
Chris Wilsonf05816c2019-11-01 10:15:28 +00001215 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1216
Daniel Vetterbdf1e7e2014-05-21 17:37:52 +02001217 /*
1218 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1219 * will be rejected. Instead look for a better place.
1220 */
Jani Nikula77fec552014-03-31 14:27:22 +03001221};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Chris Wilson2c1792a2013-08-01 18:39:55 +01001223static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1224{
Chris Wilson091387c2016-06-24 14:00:21 +01001225 return container_of(dev, struct drm_i915_private, drm);
Chris Wilson2c1792a2013-08-01 18:39:55 +01001226}
1227
David Weinehallc49d13e2016-08-22 13:32:42 +03001228static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
Imre Deak888d0d42015-01-08 17:54:13 +02001229{
Chris Wilson361f9dc2019-08-06 08:42:19 +01001230 return dev_get_drvdata(kdev);
1231}
1232
1233static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1234{
1235 return pci_get_drvdata(pdev);
Imre Deak888d0d42015-01-08 17:54:13 +02001236}
1237
Dave Gordonb4ac5af2016-03-24 11:20:38 +00001238/* Simple iterator over all initialised engines */
Akash Goel3b3f1652016-10-13 22:44:48 +05301239#define for_each_engine(engine__, dev_priv__, id__) \
1240 for ((id__) = 0; \
1241 (id__) < I915_NUM_ENGINES; \
1242 (id__)++) \
1243 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
Dave Gordonc3232b12016-03-23 18:19:53 +00001244
1245/* Iterator over subset of engines selected by mask */
Tvrtko Ursulina50134b2019-10-17 17:18:52 +01001246#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1247 for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
Tvrtko Ursulin19d3cf02018-04-06 12:44:07 +01001248 (tmp__) ? \
Tvrtko Ursulina50134b2019-10-17 17:18:52 +01001249 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
Tvrtko Ursulin19d3cf02018-04-06 12:44:07 +01001250 0;)
Mika Kuoppalaee4b6fa2016-03-16 17:54:00 +02001251
Chris Wilson750e76b2019-08-06 13:43:00 +01001252#define rb_to_uabi_engine(rb) \
1253 rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1254
1255#define for_each_uabi_engine(engine__, i915__) \
1256 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1257 (engine__); \
1258 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1259
Daniel Vetter190d6cd2013-07-04 13:06:28 +02001260#define I915_GTT_OFFSET_NONE ((u32)-1)
Chris Wilsoned2f3452012-11-15 11:32:19 +00001261
Daniel Vettera071fa02014-06-18 23:28:09 +02001262/*
1263 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05301264 * considered to be the frontbuffer for the given plane interface-wise. This
Daniel Vettera071fa02014-06-18 23:28:09 +02001265 * doesn't mean that the hw necessarily already scans it out, but that any
1266 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1267 *
1268 * We have one bit per pipe and per scanout plane type.
1269 */
Sagar Arun Kambled1b9d032015-09-14 21:35:42 +05301270#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
Ville Syrjäläaa81e2c2018-01-24 20:36:42 +02001271#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1272 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1273 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1274 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1275})
Daniel Vettera071fa02014-06-18 23:28:09 +02001276#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
Ville Syrjäläaa81e2c2018-01-24 20:36:42 +02001277 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
Daniel Vettercc365132014-06-18 13:59:13 +02001278#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
Ville Syrjäläaa81e2c2018-01-24 20:36:42 +02001279 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1280 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
Daniel Vettera071fa02014-06-18 23:28:09 +02001281
Jani Nikula2cc83762018-12-31 16:56:46 +02001282#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
Jani Nikula02584042018-12-31 16:56:41 +02001283#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
Chris Wilson481827b2018-07-06 11:14:41 +01001284#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001285
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001286#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
Jani Nikula02584042018-12-31 16:56:41 +02001287#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
Zou Nan haicae58522010-11-09 17:17:32 +08001288
Jani Nikulae87a0052015-10-20 15:22:02 +03001289#define REVID_FOREVER 0xff
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00001290#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001291
Joonas Lahtinenfe52e592017-09-13 14:52:54 +03001292#define INTEL_GEN_MASK(s, e) ( \
1293 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1294 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
Rodrigo Vivi5bc0e892018-10-26 12:51:43 -07001295 GENMASK((e) - 1, (s) - 1))
Joonas Lahtinenfe52e592017-09-13 14:52:54 +03001296
Rodrigo Vivi5bc0e892018-10-26 12:51:43 -07001297/* Returns true if Gen is in inclusive range [Start, End] */
Lucas De Marchi00690002018-12-12 10:10:42 -08001298#define IS_GEN_RANGE(dev_priv, s, e) \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001299 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
Tvrtko Ursulinac657f62016-05-10 10:57:08 +01001300
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001301#define IS_GEN(dev_priv, n) \
1302 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001303 INTEL_INFO(dev_priv)->gen == (n))
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001304
Animesh Manna18febcb2019-09-20 17:29:21 +05301305#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
1306
Jani Nikulae87a0052015-10-20 15:22:02 +03001307/*
1308 * Return true if revision is in range [since,until] inclusive.
1309 *
1310 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1311 */
1312#define IS_REVID(p, since, until) \
1313 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1314
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001315static __always_inline unsigned int
1316__platform_mask_index(const struct intel_runtime_info *info,
1317 enum intel_platform p)
1318{
1319 const unsigned int pbits =
1320 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1321
1322 /* Expand the platform_mask array if this fails. */
1323 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1324 pbits * ARRAY_SIZE(info->platform_mask));
1325
1326 return p / pbits;
1327}
1328
1329static __always_inline unsigned int
1330__platform_mask_bit(const struct intel_runtime_info *info,
1331 enum intel_platform p)
1332{
1333 const unsigned int pbits =
1334 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1335
1336 return p % pbits + INTEL_SUBPLATFORM_BITS;
1337}
1338
1339static inline u32
1340intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1341{
1342 const unsigned int pi = __platform_mask_index(info, p);
1343
1344 return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
1345}
1346
1347static __always_inline bool
1348IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1349{
1350 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1351 const unsigned int pi = __platform_mask_index(info, p);
1352 const unsigned int pb = __platform_mask_bit(info, p);
1353
1354 BUILD_BUG_ON(!__builtin_constant_p(p));
1355
1356 return info->platform_mask[pi] & BIT(pb);
1357}
1358
1359static __always_inline bool
1360IS_SUBPLATFORM(const struct drm_i915_private *i915,
1361 enum intel_platform p, unsigned int s)
1362{
1363 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1364 const unsigned int pi = __platform_mask_index(info, p);
1365 const unsigned int pb = __platform_mask_bit(info, p);
1366 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1367 const u32 mask = info->platform_mask[pi];
1368
1369 BUILD_BUG_ON(!__builtin_constant_p(p));
1370 BUILD_BUG_ON(!__builtin_constant_p(s));
1371 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1372
1373 /* Shift and test on the MSB position so sign flag can be used. */
1374 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1375}
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01001376
Tvrtko Ursuline08891a2019-03-26 07:40:55 +00001377#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
José Roberto de Souzadc90fe32019-10-24 12:51:19 -07001378#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
Tvrtko Ursuline08891a2019-03-26 07:40:55 +00001379
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01001380#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
1381#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
1382#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
1383#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
1384#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
1385#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
1386#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
1387#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
1388#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
1389#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
1390#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
1391#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
Jani Nikulaf69c11a2016-11-30 17:43:05 +02001392#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01001393#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1394#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
Tvrtko Ursuline08891a2019-03-26 07:40:55 +00001395#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1396#define IS_IRONLAKE_M(dev_priv) \
1397 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01001398#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
Lionel Landwerlin18b53812017-08-30 17:12:07 +01001399#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001400 INTEL_INFO(dev_priv)->gt == 1)
Tvrtko Ursulin5a127a82017-09-20 10:26:59 +01001401#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1402#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1403#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
1404#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1405#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1406#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
1407#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1408#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1409#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1410#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
Rodrigo Vivi412310012018-01-11 16:00:04 -02001411#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
Bob Paauwe897f2962019-03-22 10:58:43 -07001412#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
Daniele Ceraolo Spurioabd3a0f2019-07-11 10:30:56 -07001413#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001414#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1415 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001416#define IS_BDW_ULT(dev_priv) \
1417 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1418#define IS_BDW_ULX(dev_priv) \
1419 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001420#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001421 INTEL_INFO(dev_priv)->gt == 3)
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001422#define IS_HSW_ULT(dev_priv) \
1423 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001424#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001425 INTEL_INFO(dev_priv)->gt == 3)
Chris Wilson167bc752018-12-28 14:07:34 +00001426#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001427 INTEL_INFO(dev_priv)->gt == 1)
Paulo Zanoni9bbfd202014-04-29 11:00:22 -03001428/* ULX machines are also considered ULT. */
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001429#define IS_HSW_ULX(dev_priv) \
1430 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1431#define IS_SKL_ULT(dev_priv) \
1432 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1433#define IS_SKL_ULX(dev_priv) \
1434 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1435#define IS_KBL_ULT(dev_priv) \
1436 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1437#define IS_KBL_ULX(dev_priv) \
1438 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
Robert Bragg19f81df2017-06-13 12:23:03 +01001439#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001440 INTEL_INFO(dev_priv)->gt == 2)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001441#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001442 INTEL_INFO(dev_priv)->gt == 3)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001443#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001444 INTEL_INFO(dev_priv)->gt == 4)
Lionel Landwerlin38915892017-06-13 12:23:07 +01001445#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001446 INTEL_INFO(dev_priv)->gt == 2)
Lionel Landwerlin38915892017-06-13 12:23:07 +01001447#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001448 INTEL_INFO(dev_priv)->gt == 3)
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001449#define IS_CFL_ULT(dev_priv) \
1450 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
Ville Syrjälä6ce1c332019-06-05 19:29:46 +03001451#define IS_CFL_ULX(dev_priv) \
1452 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
Lionel Landwerlin22ea4f32017-09-18 12:21:24 +01001453#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001454 INTEL_INFO(dev_priv)->gt == 2)
Lionel Landwerlin4407eaa2017-11-10 19:08:40 +00001455#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001456 INTEL_INFO(dev_priv)->gt == 3)
Tvrtko Ursulin805446c2019-03-27 14:23:28 +00001457#define IS_CNL_WITH_PORT_F(dev_priv) \
1458 IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1459#define IS_ICL_WITH_PORT_F(dev_priv) \
1460 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
Sagar Arun Kamble7a58bad2015-09-12 10:17:50 +05301461
Jani Nikulaef712bb2015-10-20 15:22:00 +03001462#define SKL_REVID_A0 0x0
1463#define SKL_REVID_B0 0x1
1464#define SKL_REVID_C0 0x2
1465#define SKL_REVID_D0 0x3
1466#define SKL_REVID_E0 0x4
1467#define SKL_REVID_F0 0x5
Mika Kuoppala4ba9c1f2016-07-20 14:26:12 +03001468#define SKL_REVID_G0 0x6
1469#define SKL_REVID_H0 0x7
Hoath, Nicholase90a21d2015-02-05 10:47:17 +00001470
Jani Nikulae87a0052015-10-20 15:22:02 +03001471#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
1472
Jani Nikulaef712bb2015-10-20 15:22:00 +03001473#define BXT_REVID_A0 0x0
Jani Nikulafffda3f2015-10-20 15:22:01 +03001474#define BXT_REVID_A1 0x1
Jani Nikulaef712bb2015-10-20 15:22:00 +03001475#define BXT_REVID_B0 0x3
Ander Conselvan de Oliveiraa3f79ca2016-11-24 15:23:27 +02001476#define BXT_REVID_B_LAST 0x8
Jani Nikulaef712bb2015-10-20 15:22:00 +03001477#define BXT_REVID_C0 0x9
Nick Hoath6c74c872015-03-20 09:03:52 +00001478
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +01001479#define IS_BXT_REVID(dev_priv, since, until) \
1480 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
Jani Nikulae87a0052015-10-20 15:22:02 +03001481
Mika Kuoppalac033a372016-06-07 17:18:55 +03001482#define KBL_REVID_A0 0x0
1483#define KBL_REVID_B0 0x1
Mika Kuoppalafe905812016-06-07 17:19:03 +03001484#define KBL_REVID_C0 0x2
1485#define KBL_REVID_D0 0x3
1486#define KBL_REVID_E0 0x4
Mika Kuoppalac033a372016-06-07 17:18:55 +03001487
Tvrtko Ursulin08537232016-10-13 11:03:02 +01001488#define IS_KBL_REVID(dev_priv, since, until) \
1489 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
Mika Kuoppalac033a372016-06-07 17:18:55 +03001490
Ander Conselvan de Oliveiraf4f4b592017-02-22 08:34:29 +02001491#define GLK_REVID_A0 0x0
1492#define GLK_REVID_A1 0x1
Ville Syrjälä834c6bb2020-01-28 17:51:52 +02001493#define GLK_REVID_A2 0x2
1494#define GLK_REVID_B0 0x3
Ander Conselvan de Oliveiraf4f4b592017-02-22 08:34:29 +02001495
1496#define IS_GLK_REVID(dev_priv, since, until) \
1497 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1498
Paulo Zanoni3c2e0fd2017-06-06 13:30:34 -07001499#define CNL_REVID_A0 0x0
1500#define CNL_REVID_B0 0x1
Rodrigo Vivie4ffc832017-08-22 16:58:28 -07001501#define CNL_REVID_C0 0x2
Paulo Zanoni3c2e0fd2017-06-06 13:30:34 -07001502
1503#define IS_CNL_REVID(p, since, until) \
1504 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
1505
Oscar Mateocc38cae2018-05-08 14:29:23 -07001506#define ICL_REVID_A0 0x0
1507#define ICL_REVID_A2 0x1
1508#define ICL_REVID_B0 0x3
1509#define ICL_REVID_B2 0x4
1510#define ICL_REVID_C0 0x5
1511
1512#define IS_ICL_REVID(p, since, until) \
1513 (IS_ICELAKE(p) && IS_REVID(p, since, until))
1514
Swathi Dhanavanthri61b088c2020-05-12 11:00:50 -07001515#define EHL_REVID_A0 0x0
1516
1517#define IS_EHL_REVID(p, since, until) \
1518 (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
1519
Mika Kuoppala613716b2019-10-15 18:44:39 +03001520#define TGL_REVID_A0 0x0
Matt Roperdbff5a82020-04-14 14:11:17 -07001521#define TGL_REVID_B0 0x1
1522#define TGL_REVID_C0 0x2
Mika Kuoppala613716b2019-10-15 18:44:39 +03001523
1524#define IS_TGL_REVID(p, since, until) \
1525 (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
1526
Rodrigo Vivi8727dc02016-12-18 13:36:26 -08001527#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001528#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1529#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
Ander Conselvan de Oliveira3e4274f2016-11-10 17:23:09 +02001530
Chris Wilson8a68d462019-03-05 18:03:30 +00001531#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
Tvrtko Ursulina19d6ff2016-06-23 14:52:41 +01001532
Daniele Ceraolo Spurio97ee6e92019-03-21 17:24:31 -07001533#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
1534 unsigned int first__ = (first); \
1535 unsigned int count__ = (count); \
1536 (INTEL_INFO(dev_priv)->engine_mask & \
Chris Wilson9511cb62019-03-26 18:00:07 +00001537 GENMASK(first__ + count__ - 1, first__)) >> first__; \
Daniele Ceraolo Spurio97ee6e92019-03-21 17:24:31 -07001538})
1539#define VDBOX_MASK(dev_priv) \
1540 ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
1541#define VEBOX_MASK(dev_priv) \
1542 ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
1543
Jon Bloomfield4f7af192018-05-22 13:59:06 -07001544/*
1545 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1546 * All later gens can run the final buffer from the ppgtt
1547 */
1548#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
1549
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001550#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1551#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07001552#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
Jon Bloomfield44157642018-06-08 08:53:46 -07001553#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001554#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
1555 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
Zou Nan haicae58522010-11-09 17:17:32 +08001556
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001557#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
Daniel Vetter1d2a3142012-02-09 17:15:46 +01001558
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001559#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001560 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
Thomas Daniel05f0add2018-03-02 18:14:59 +02001561#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001562 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
Michał Winiarskia4598d12017-10-25 22:00:18 +02001563#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001564 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
Chris Wilsonfb5c5512017-11-20 20:55:00 +00001565
1566#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1567
Chris Wilsoncbecbcc2019-03-14 22:38:36 +00001568#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
Chris Wilson4bdafb92018-09-26 21:12:22 +01001569#define HAS_PPGTT(dev_priv) \
1570 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1571#define HAS_FULL_PPGTT(dev_priv) \
1572 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
Chris Wilson4bdafb92018-09-26 21:12:22 +01001573
Matthew Aulda5c081662017-10-06 23:18:18 +01001574#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1575 GEM_BUG_ON((sizes) == 0); \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001576 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
Matthew Aulda5c081662017-10-06 23:18:18 +01001577})
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001578
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001579#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
Tvrtko Ursulin0031fb92016-11-04 14:42:44 +00001580#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001581 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
Zou Nan haicae58522010-11-09 17:17:32 +08001582
Daniel Vetterb45305f2012-12-17 16:21:27 +01001583/* Early gen2 have a totally busted CS tlb and require pinned batches. */
Jani Nikula2a307c22016-11-30 17:43:04 +02001584#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
Mika Kuoppala06e668a2015-12-16 19:18:37 +02001585
Imre Deak2248a282019-10-17 16:38:31 +03001586#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
1587 (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
1588
Rodrigo Vivid66047e42018-02-22 12:05:35 -08001589/* WaRsDisableCoarsePowerGating:skl,cnl */
Chris Wilson32f408a2019-12-31 12:27:08 +00001590#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1591 (IS_CANNONLAKE(dev_priv) || \
1592 IS_SKL_GT3(dev_priv) || \
1593 IS_SKL_GT4(dev_priv))
Mika Kuoppala185c66e2016-04-05 15:56:16 +03001594
Ville Syrjälä309bd8e2017-08-18 21:37:05 +03001595#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
Ramalingam Cd5dc0f42018-06-28 19:04:49 +05301596#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
1597 IS_GEMINILAKE(dev_priv) || \
1598 IS_KABYLAKE(dev_priv))
Daniel Vetterb45305f2012-12-17 16:21:27 +01001599
Zou Nan haicae58522010-11-09 17:17:32 +08001600/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1601 * rows, which changed the alignment requirements and fence programming.
1602 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001603#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001604 !(IS_I915G(dev_priv) || \
1605 IS_I915GM(dev_priv)))
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001606#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1607#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
Zou Nan haicae58522010-11-09 17:17:32 +08001608
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00001609#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001610#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08001611#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
Zou Nan haicae58522010-11-09 17:17:32 +08001612
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001613#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
Damien Lespiauf5adf942013-06-24 18:29:34 +01001614
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001615#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
Jani Nikula0c9b3712015-05-18 17:10:01 +03001616
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001617#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1618#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1619#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
Ville Syrjälä10cf8e72020-03-18 19:02:35 +02001620#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00001621
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001622#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1623#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
Chris Wilsonfb6db0f2017-12-01 11:30:30 +00001624#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001625
Chris Wilson91cbdb82019-04-19 14:48:36 +01001626#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
1627
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001628#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
Daniel Vettereb805622015-05-04 14:58:44 +02001629
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001630#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1631#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
Joonas Lahtinendfc51482016-11-03 10:39:46 +02001632
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001633#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
Mahesh Kumare57f1c022017-08-17 19:15:27 +05301634
Abdiel Janulgue3aae9d02019-10-18 10:07:49 +01001635#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
Matthew Auldb908be52019-10-25 16:37:22 +01001636#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
Abdiel Janulgue3aae9d02019-10-18 10:07:49 +01001637
Daniele Ceraolo Spurio702668e2019-07-24 17:18:06 -07001638#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
Michal Wajdeczko2fe2d4e2017-12-06 13:53:10 +00001639
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001640#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
arun.siluvery@linux.intel.com33e141e2016-06-03 06:34:33 +01001641
Michel Thierrya7a7a0e2019-07-30 11:04:06 -07001642#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
1643
Zou Nan haicae58522010-11-09 17:17:32 +08001644
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08001645#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
Sonika Jindal5fafe292014-07-21 15:23:38 +05301646
Rodrigo Viviff159472017-06-09 15:26:14 -07001647#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
Shashank Sharma6389dd82016-10-14 19:56:50 +05301648
Ben Widawsky040d2ba2013-09-19 11:01:40 -07001649/* DPF == dynamic parity feature */
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001650#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001651#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1652 2 : HAS_L3_DPF(dev_priv))
Ben Widawskye1ef7cc2012-07-24 20:47:31 -07001653
Ben Widawskyc8735b02012-09-07 19:43:39 -07001654#define GT_FREQUENCY_MULTIPLIER 50
Akash Goelde43ae92015-03-06 11:07:14 +05301655#define GEN9_FREQ_SCALER 3
Ben Widawskyc8735b02012-09-07 19:43:39 -07001656
Jani Nikula8d8b00312019-09-11 23:29:08 +03001657#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
Jani Nikula24977872019-09-11 12:26:08 +03001658
Jani Nikula8d8b00312019-09-11 23:29:08 +03001659#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
José Roberto de Souzae1bf0942018-11-30 15:20:47 -08001660
Jani Nikulaa2b69ea2019-09-13 13:04:07 +03001661/* Only valid when HAS_DISPLAY() is true */
1662#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
1663
Chris Wilson80debff2017-05-25 13:16:12 +01001664static inline bool intel_vtd_active(void)
Chris Wilson48f112f2016-06-24 14:07:14 +01001665{
1666#ifdef CONFIG_INTEL_IOMMU
Chris Wilson80debff2017-05-25 13:16:12 +01001667 if (intel_iommu_gfx_mapped)
Chris Wilson48f112f2016-06-24 14:07:14 +01001668 return true;
1669#endif
1670 return false;
1671}
1672
Chris Wilson80debff2017-05-25 13:16:12 +01001673static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1674{
1675 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
1676}
1677
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07001678static inline bool
1679intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
1680{
Chris Wilson80debff2017-05-25 13:16:12 +01001681 return IS_BROXTON(dev_priv) && intel_vtd_active();
Jon Bloomfield0ef34ad2017-05-24 08:54:11 -07001682}
1683
Chris Wilson0673ad42016-06-24 14:00:22 +01001684/* i915_drv.c */
Jani Nikulaefab0692016-09-15 16:28:54 +03001685extern const struct dev_pm_ops i915_pm_ops;
1686
Janusz Krzysztofikb01558e2019-07-12 13:24:26 +02001687int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
Chris Wilson361f9dc2019-08-06 08:42:19 +01001688void i915_driver_remove(struct drm_i915_private *i915);
Chris Wilson535275d2017-07-21 13:32:37 +01001689
Jani Nikula63bf8302019-10-04 15:20:18 +03001690int i915_resume_switcheroo(struct drm_i915_private *i915);
1691int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1692
Chris Wilson26f00512019-08-07 15:20:41 +01001693int i915_getparam_ioctl(struct drm_device *dev, void *data,
1694 struct drm_file *file_priv);
1695
Eric Anholt673a3942008-07-30 12:06:12 -07001696/* i915_gem.c */
Chris Wilson8a2421b2017-06-16 15:05:22 +01001697int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1698void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
Matthew Aulda3f356b2019-09-27 18:33:49 +01001699void i915_gem_init_early(struct drm_i915_private *dev_priv);
Michal Wajdeczkoa0de9082018-03-23 12:34:49 +00001700void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
Chris Wilson6a800ea2016-09-21 14:51:07 +01001701int i915_gem_freeze(struct drm_i915_private *dev_priv);
Chris Wilson461fb992016-05-14 07:26:33 +01001702int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
1703
Matthew Auldda1184c2019-10-18 10:07:50 +01001704struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
1705
Chris Wilsonbdeb9782016-12-23 14:57:56 +00001706static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1707{
Chris Wilsonc03467b2019-07-03 10:17:17 +01001708 /*
1709 * A single pass should suffice to release all the freed objects (along
Chris Wilsonbdeb9782016-12-23 14:57:56 +00001710 * most call paths) , but be a little more paranoid in that freeing
1711 * the objects does take a little amount of time, during which the rcu
1712 * callbacks could have added new objects into the freed list, and
1713 * armed the work again.
1714 */
Chris Wilsonc03467b2019-07-03 10:17:17 +01001715 while (atomic_read(&i915->mm.free_count)) {
1716 flush_work(&i915->mm.free_work);
Chris Wilsonbdeb9782016-12-23 14:57:56 +00001717 rcu_barrier();
Chris Wilsonc03467b2019-07-03 10:17:17 +01001718 }
Chris Wilsonbdeb9782016-12-23 14:57:56 +00001719}
1720
Chris Wilson3b19f162017-07-18 14:41:24 +01001721static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1722{
1723 /*
1724 * Similar to objects above (see i915_gem_drain_freed-objects), in
1725 * general we have workers that are armed by RCU and then rearm
1726 * themselves in their callbacks. To be paranoid, we need to
1727 * drain the workqueue a second time after waiting for the RCU
1728 * grace period so that we catch work queued via RCU from the first
1729 * pass. As neither drain_workqueue() nor flush_workqueue() report
1730 * a result, we make an assumption that we only don't require more
Chris Wilsondc76e572019-05-01 14:57:51 +01001731 * than 3 passes to catch all _recursive_ RCU delayed work.
Chris Wilson3b19f162017-07-18 14:41:24 +01001732 *
1733 */
Chris Wilsondc76e572019-05-01 14:57:51 +01001734 int pass = 3;
Chris Wilson3b19f162017-07-18 14:41:24 +01001735 do {
Chris Wilson4fda44b2019-07-03 18:19:13 +01001736 flush_workqueue(i915->wq);
Chris Wilson3b19f162017-07-18 14:41:24 +01001737 rcu_barrier();
Janusz Krzysztofik141f3762019-04-06 11:40:34 +01001738 i915_gem_drain_freed_objects(i915);
Chris Wilson3b19f162017-07-18 14:41:24 +01001739 } while (--pass);
Chris Wilsondc76e572019-05-01 14:57:51 +01001740 drain_workqueue(i915->wq);
Chris Wilson3b19f162017-07-18 14:41:24 +01001741}
1742
Chris Wilson058d88c2016-08-15 10:49:06 +01001743struct i915_vma * __must_check
Joonas Lahtinenec7adb62015-03-16 14:11:13 +02001744i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1745 const struct i915_ggtt_view *view,
Chris Wilson91b2db62016-08-04 16:32:23 +01001746 u64 size,
Chris Wilson2ffffd02016-08-04 16:32:22 +01001747 u64 alignment,
1748 u64 flags);
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00001749
Chris Wilsonc03467b2019-07-03 10:17:17 +01001750int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1751 unsigned long flags);
1752#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
Chris Wilson16c46fd2019-12-08 16:12:51 +00001753#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
Chris Wilson9da0ea02020-04-01 23:39:24 +01001754#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001755
Chris Wilson7c108fd2016-10-24 13:42:18 +01001756void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1757
Dave Airlieff72145b2011-02-07 12:16:14 +10001758int i915_gem_dumb_create(struct drm_file *file_priv,
1759 struct drm_device *dev,
1760 struct drm_mode_create_dumb *args);
Dave Gordon85d12252016-05-20 11:54:06 +01001761
Chris Wilson73cb9702016-10-28 13:58:46 +01001762int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
Chris Wilson1690e1e2011-12-14 13:57:08 +01001763
Mika Kuoppala2ac0f452013-11-12 14:44:19 +02001764static inline u32 i915_reset_count(struct i915_gpu_error *error)
1765{
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001766 return atomic_read(&error->reset_count);
Daniel Vetter1f83fee2012-11-15 17:17:22 +01001767}
Chris Wilsona71d8d92012-02-15 11:25:36 +00001768
Michel Thierry702c8f82017-06-20 10:57:48 +01001769static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
Chris Wilson742379c2020-01-10 12:30:56 +00001770 const struct intel_engine_cs *engine)
Michel Thierry702c8f82017-06-20 10:57:48 +01001771{
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001772 return atomic_read(&error->reset_engine_count[engine->uabi_class]);
Michel Thierry702c8f82017-06-20 10:57:48 +01001773}
1774
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001775int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
Chris Wilsonc29579d2019-08-06 13:42:59 +01001776void i915_gem_driver_register(struct drm_i915_private *i915);
1777void i915_gem_driver_unregister(struct drm_i915_private *i915);
Janusz Krzysztofik78dae1a2019-07-12 13:24:29 +02001778void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
Janusz Krzysztofik3b58a942019-07-12 13:24:28 +02001779void i915_gem_driver_release(struct drm_i915_private *dev_priv);
Chris Wilson5861b012019-03-08 09:36:54 +00001780void i915_gem_suspend(struct drm_i915_private *dev_priv);
Chris Wilsonec92ad02018-05-31 09:22:46 +01001781void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
Tvrtko Ursulinbf9e8422016-12-01 14:16:38 +00001782void i915_gem_resume(struct drm_i915_private *dev_priv);
Chris Wilson6b5e90f2016-11-14 20:41:05 +00001783
Chris Wilson829a0af2017-06-20 12:05:45 +01001784int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
Chris Wilson05394f32010-11-08 19:18:58 +00001785void i915_gem_release(struct drm_device *dev, struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -07001786
Chris Wilsone4ffd172011-04-04 09:44:39 +01001787int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1788 enum i915_cache_level cache_level);
1789
Daniel Vetter1286ff72012-05-10 15:25:09 +02001790struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1791 struct dma_buf *dma_buf);
1792
Daniel Vettere4fa8452019-06-14 22:35:25 +02001793struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
Daniel Vetter1286ff72012-05-10 15:25:09 +02001794
Chris Wilsonca585b52016-05-24 14:53:36 +01001795static inline struct i915_gem_context *
Chris Wilson1acfc102017-06-20 12:05:47 +01001796__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
1797{
Tvrtko Ursulinc1007772019-12-24 09:59:20 +00001798 return xa_load(&file_priv->context_xa, id);
Chris Wilson1acfc102017-06-20 12:05:47 +01001799}
1800
1801static inline struct i915_gem_context *
Chris Wilsonca585b52016-05-24 14:53:36 +01001802i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1803{
1804 struct i915_gem_context *ctx;
1805
Chris Wilson1acfc102017-06-20 12:05:47 +01001806 rcu_read_lock();
1807 ctx = __i915_gem_context_lookup_rcu(file_priv, id);
1808 if (ctx && !kref_get_unless_zero(&ctx->ref))
1809 ctx = NULL;
1810 rcu_read_unlock();
Chris Wilsonca585b52016-05-24 14:53:36 +01001811
1812 return ctx;
1813}
1814
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001815/* i915_gem_evict.c */
Chris Wilsone522ac232016-08-04 16:32:18 +01001816int __must_check i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +01001817 u64 min_size, u64 alignment,
Matthew Auld33dd8892019-09-09 13:40:52 +01001818 unsigned long color,
Chris Wilson2ffffd02016-08-04 16:32:22 +01001819 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +01001820 unsigned flags);
Chris Wilson625d9882017-01-11 11:23:11 +00001821int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1822 struct drm_mm_node *node,
1823 unsigned int flags);
Chris Wilson2889caa2017-06-16 15:05:19 +01001824int i915_gem_evict_vm(struct i915_address_space *vm);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001825
Chris Wilson920cf412016-10-28 13:58:30 +01001826/* i915_gem_internal.c */
1827struct drm_i915_gem_object *
1828i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
Chris Wilsonfcd46e52017-01-12 13:04:31 +00001829 phys_addr_t size);
Chris Wilson920cf412016-10-28 13:58:30 +01001830
Eric Anholt673a3942008-07-30 12:06:12 -07001831/* i915_gem_tiling.c */
Chris Wilson2c1792a2013-08-01 18:39:55 +01001832static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Chris Wilsone9b73c62012-12-03 21:03:14 +00001833{
Chris Wilson972c6462019-10-16 15:32:34 +01001834 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Chris Wilsone9b73c62012-12-03 21:03:14 +00001835
Chris Wilson972c6462019-10-16 15:32:34 +01001836 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson3e510a82016-08-05 10:14:23 +01001837 i915_gem_object_is_tiled(obj);
Chris Wilsone9b73c62012-12-03 21:03:14 +00001838}
1839
Chris Wilson91d4e0aa2017-01-09 16:16:13 +00001840u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1841 unsigned int tiling, unsigned int stride);
1842u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1843 unsigned int tiling, unsigned int stride);
1844
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001845const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
Ben Gamari20172632009-02-17 20:08:50 -05001846
Brad Volkin351e3db2014-02-18 10:15:46 -08001847/* i915_cmd_parser.c */
Chris Wilson1ca37122016-05-04 14:25:36 +01001848int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
Chris Wilson7756e452016-08-18 17:17:10 +01001849void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
Chris Wilson33a051a2016-07-27 09:07:26 +01001850void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
Chris Wilson05975cd2019-12-04 23:26:16 +00001851int intel_engine_cmd_parser(struct intel_engine_cs *engine,
Chris Wilson755bf8a2019-12-11 11:04:34 +00001852 struct i915_vma *batch,
1853 u32 batch_offset,
1854 u32 batch_length,
Chris Wilson32d94042019-12-11 23:08:56 +00001855 struct i915_vma *shadow,
1856 bool trampoline);
1857#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
Brad Volkin351e3db2014-02-18 10:15:46 -08001858
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001859/* intel_device_info.c */
1860static inline struct intel_device_info *
1861mkwrite_device_info(struct drm_i915_private *dev_priv)
1862{
Jani Nikulaa0f04cc2018-12-31 16:56:44 +02001863 return (struct intel_device_info *)INTEL_INFO(dev_priv);
Chris Wilson94b4f3b2016-07-05 10:40:20 +01001864}
1865
Ben Widawskyc0c7bab2012-07-12 11:01:05 -07001866int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1867 struct drm_file *file);
Jesse Barnes575155a2012-03-28 13:39:37 -07001868
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001869#define __I915_REG_OP(op__, dev_priv__, ...) \
1870 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
Keith Packard5f753772010-11-22 09:24:22 +00001871
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001872#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
1873#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
Keith Packard5f753772010-11-22 09:24:22 +00001874
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001875#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
Zou Nan haicae58522010-11-09 17:17:32 +08001876
Chris Wilsona6111f72015-04-07 16:21:02 +01001877/* These are untraced mmio-accessors that are only valid to be used inside
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02001878 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
Chris Wilsona6111f72015-04-07 16:21:02 +01001879 * controlled.
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02001880 *
Chris Wilsona6111f72015-04-07 16:21:02 +01001881 * Think twice, and think again, before using these.
Arkadiusz Hileraafee2e2016-10-25 14:48:02 +02001882 *
1883 * As an example, these accessors can possibly be used between:
1884 *
1885 * spin_lock_irq(&dev_priv->uncore.lock);
1886 * intel_uncore_forcewake_get__locked();
1887 *
1888 * and
1889 *
1890 * intel_uncore_forcewake_put__locked();
1891 * spin_unlock_irq(&dev_priv->uncore.lock);
1892 *
1893 *
1894 * Note: some registers may not need forcewake held, so
1895 * intel_uncore_forcewake_{get,put} can be omitted, see
1896 * intel_uncore_forcewake_for_reg().
1897 *
1898 * Certain architectures will die if the same cacheline is concurrently accessed
1899 * by different clients (e.g. on Ivybridge). Access to registers should
1900 * therefore generally be serialised, by either the dev_priv->uncore.lock or
1901 * a more localised lock guarding all access to that bank of registers.
Chris Wilsona6111f72015-04-07 16:21:02 +01001902 */
Daniele Ceraolo Spurioa2b4abf2019-03-25 14:49:36 -07001903#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
1904#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
Chris Wilsona6111f72015-04-07 16:21:02 +01001905
Chris Wilsonc58305a2016-08-19 16:54:28 +01001906/* i915_mm.c */
1907int remap_io_mapping(struct vm_area_struct *vma,
1908 unsigned long addr, unsigned long pfn, unsigned long size,
1909 struct io_mapping *iomap);
Abdiel Janulgue4e598fa2020-01-03 20:41:35 +00001910int remap_io_sg(struct vm_area_struct *vma,
1911 unsigned long addr, unsigned long size,
1912 struct scatterlist *sgl, resource_size_t iobase);
Chris Wilsonc58305a2016-08-19 16:54:28 +01001913
Chris Wilson767a9832017-09-13 09:56:05 +01001914static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
1915{
1916 if (INTEL_GEN(i915) >= 10)
1917 return CNL_HWS_CSB_WRITE_INDEX;
1918 else
1919 return I915_HWS_CSB_WRITE_INDEX;
1920}
1921
Chris Wilson98932142019-05-28 10:29:44 +01001922static inline enum i915_map_type
1923i915_coherent_map_type(struct drm_i915_private *i915)
1924{
1925 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1926}
1927
Ville Syrjälä802a58202020-03-02 16:39:42 +02001928static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
1929{
1930 return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
1931 1000000000);
1932}
1933
1934static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
1935{
1936 return div_u64(val * 1000000000,
1937 RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
1938}
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940#endif