Merge drm/drm-next into drm-intel-next-queued

Catch-up with 5.2. Specially to remove a drm-tip merge
fixup around intel_workarounds.

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91355c2e..5266dbe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -35,7 +35,7 @@
 # Extra header tests
 include $(src)/Makefile.header-test
 
-subdir-ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(srctree)/$(src)
 
 # Please keep these build lists sorted!
 
@@ -74,14 +74,23 @@
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
 	gt/intel_engine_pm.o \
+	gt/intel_gt.o \
 	gt/intel_gt_pm.o \
 	gt/intel_hangcheck.o \
 	gt/intel_lrc.o \
+	gt/intel_renderstate.o \
 	gt/intel_reset.o \
 	gt/intel_ringbuffer.o \
 	gt/intel_mocs.o \
 	gt/intel_sseu.o \
+	gt/intel_timeline.o \
 	gt/intel_workarounds.o
+# autogenerated null render state
+gt-y += \
+	gt/gen6_renderstate.o \
+	gt/gen7_renderstate.o \
+	gt/gen8_renderstate.o \
+	gt/gen9_renderstate.o
 gt-$(CONFIG_DRM_I915_SELFTEST) += \
 	gt/mock_engine.o
 i915-y += $(gt-y)
@@ -121,12 +130,10 @@
 	  i915_gem_fence_reg.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
-	  i915_gem_render_state.o \
 	  i915_globals.o \
 	  i915_query.o \
 	  i915_request.o \
 	  i915_scheduler.o \
-	  i915_timeline.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
 	  intel_wopcm.o
@@ -143,12 +150,6 @@
 	  intel_huc.o \
 	  intel_huc_fw.o
 
-# autogenerated null render state
-i915-y += intel_renderstate_gen6.o \
-	  intel_renderstate_gen7.o \
-	  intel_renderstate_gen8.o \
-	  intel_renderstate_gen9.o
-
 # modesetting core code
 obj-y += display/
 i915-y += \
@@ -174,7 +175,8 @@
 	display/intel_overlay.o \
 	display/intel_psr.o \
 	display/intel_quirks.o \
-	display/intel_sprite.o
+	display/intel_sprite.o \
+	display/intel_tc.o
 i915-$(CONFIG_ACPI) += \
 	display/intel_acpi.o \
 	display/intel_opregion.o
@@ -211,6 +213,25 @@
 	display/vlv_dsi.o \
 	display/vlv_dsi_pll.o
 
+# perf code
+obj-y += oa/
+i915-y += \
+	oa/i915_oa_hsw.o \
+	oa/i915_oa_bdw.o \
+	oa/i915_oa_chv.o \
+	oa/i915_oa_sklgt2.o \
+	oa/i915_oa_sklgt3.o \
+	oa/i915_oa_sklgt4.o \
+	oa/i915_oa_bxt.o \
+	oa/i915_oa_kblgt2.o \
+	oa/i915_oa_kblgt3.o \
+	oa/i915_oa_glk.o \
+	oa/i915_oa_cflgt2.o \
+	oa/i915_oa_cflgt3.o \
+	oa/i915_oa_cnl.o \
+	oa/i915_oa_icl.o
+i915-y += i915_perf.o
+
 # Post-mortem debug and GPU hang state capture
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
 i915-$(CONFIG_DRM_I915_SELFTEST) += \
@@ -225,23 +246,6 @@
 # virtual gpu code
 i915-y += i915_vgpu.o
 
-# perf code
-i915-y += i915_perf.o \
-	  i915_oa_hsw.o \
-	  i915_oa_bdw.o \
-	  i915_oa_chv.o \
-	  i915_oa_sklgt2.o \
-	  i915_oa_sklgt3.o \
-	  i915_oa_sklgt4.o \
-	  i915_oa_bxt.o \
-	  i915_oa_kblgt2.o \
-	  i915_oa_kblgt3.o \
-	  i915_oa_glk.o \
-	  i915_oa_cflgt2.o \
-	  i915_oa_cflgt3.o \
-	  i915_oa_cnl.o \
-	  i915_oa_icl.o
-
 ifeq ($(CONFIG_DRM_I915_GVT),y)
 i915-y += intel_gvt.o
 include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
index e6ba66f..2fd6186 100644
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -6,18 +6,27 @@
 	i915_active_types.h \
 	i915_debugfs.h \
 	i915_drv.h \
+	i915_fixed.h \
+	i915_gem_gtt.h \
+	i915_globals.h \
 	i915_irq.h \
 	i915_params.h \
 	i915_priolist_types.h \
+	i915_pvinfo.h \
 	i915_reg.h \
 	i915_scheduler_types.h \
-	i915_timeline_types.h \
 	i915_utils.h \
+	i915_vgpu.h \
 	intel_csr.h \
 	intel_drv.h \
+	intel_guc_ct.h \
+	intel_guc_fwif.h \
+	intel_guc_reg.h \
+	intel_gvt.h \
 	intel_pm.h \
 	intel_runtime_pm.h \
 	intel_sideband.h \
+	intel_uc_fw.h \
 	intel_uncore.h \
 	intel_wakeref.h
 
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
index 1c75b5c..eec6961 100644
--- a/drivers/gpu/drm/i915/display/Makefile
+++ b/drivers/gpu/drm/i915/display/Makefile
@@ -1,2 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
 # Extra header tests
 include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 74448e6..3cf95c3 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -403,6 +403,19 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
 		tmp &= ~FRC_LATENCY_OPTIM_MASK;
 		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
 		I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+		/* For EHL set latency optimization for PCS_DW1 lanes */
+		if (IS_ELKHARTLAKE(dev_priv)) {
+			tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+			tmp &= ~LATENCY_OPTIM_MASK;
+			tmp |= LATENCY_OPTIM_VAL(0);
+			I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+
+			tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+			tmp &= ~LATENCY_OPTIM_MASK;
+			tmp |= LATENCY_OPTIM_VAL(0x1);
+			I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+		}
 	}
 
 }
@@ -531,6 +544,14 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
 			I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
 		}
 	}
+
+	if (IS_ELKHARTLAKE(dev_priv)) {
+		for_each_dsi_port(port, intel_dsi->ports) {
+			tmp = I915_READ(ICL_DPHY_CHKN(port));
+			tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
+			I915_WRITE(ICL_DPHY_CHKN(port), tmp);
+		}
+	}
 }
 
 static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
@@ -1487,6 +1508,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 	intel_dsi_log_params(intel_dsi);
 }
 
+static void icl_dsi_add_properties(struct intel_connector *connector)
+{
+	u32 allowed_scalers;
+
+	allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
+			   BIT(DRM_MODE_SCALE_FULLSCREEN) |
+			   BIT(DRM_MODE_SCALE_CENTER);
+
+	drm_connector_attach_scaling_mode_property(&connector->base,
+						   allowed_scalers);
+
+	connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+	connector->base.display_info.panel_orientation =
+			intel_dsi_get_panel_orientation(connector);
+	drm_connector_init_panel_orientation_property(&connector->base,
+				connector->panel.fixed_mode->hdisplay,
+				connector->panel.fixed_mode->vdisplay);
+}
+
 void icl_dsi_init(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
@@ -1580,6 +1621,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	}
 
 	icl_dphy_param_init(intel_dsi);
+
+	icl_dsi_add_properties(intel_connector);
 	return;
 
 err:
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 30bd4e7..ab411d5 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -176,33 +176,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	new_crtc_state->data_rate[plane->id] =
 		intel_plane_data_rate(new_crtc_state, new_plane_state);
 
-	return intel_plane_atomic_calc_changes(old_crtc_state,
-					       &new_crtc_state->base,
-					       old_plane_state,
-					       &new_plane_state->base);
+	return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
+					       old_plane_state, new_plane_state);
 }
 
-static int intel_plane_atomic_check(struct drm_plane *plane,
-				    struct drm_plane_state *new_plane_state)
+static struct intel_crtc *
+get_crtc_from_states(const struct intel_plane_state *old_plane_state,
+		     const struct intel_plane_state *new_plane_state)
 {
-	struct drm_atomic_state *state = new_plane_state->state;
-	const struct drm_plane_state *old_plane_state =
-		drm_atomic_get_old_plane_state(state, plane);
-	struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
-	const struct drm_crtc_state *old_crtc_state;
-	struct drm_crtc_state *new_crtc_state;
+	if (new_plane_state->base.crtc)
+		return to_intel_crtc(new_plane_state->base.crtc);
 
-	new_plane_state->visible = false;
+	if (old_plane_state->base.crtc)
+		return to_intel_crtc(old_plane_state->base.crtc);
+
+	return NULL;
+}
+
+static int intel_plane_atomic_check(struct drm_plane *_plane,
+				    struct drm_plane_state *_new_plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(_plane);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(_new_plane_state->state);
+	struct intel_plane_state *new_plane_state =
+		to_intel_plane_state(_new_plane_state);
+	const struct intel_plane_state *old_plane_state =
+		intel_atomic_get_old_plane_state(state, plane);
+	struct intel_crtc *crtc =
+		get_crtc_from_states(old_plane_state, new_plane_state);
+	const struct intel_crtc_state *old_crtc_state;
+	struct intel_crtc_state *new_crtc_state;
+
+	new_plane_state->base.visible = false;
 	if (!crtc)
 		return 0;
 
-	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
-	new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 
-	return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
-						   to_intel_crtc_state(new_crtc_state),
-						   to_intel_plane_state(old_plane_state),
-						   to_intel_plane_state(new_plane_state));
+	return intel_plane_atomic_check_with_state(old_crtc_state,
+						   new_crtc_state,
+						   old_plane_state,
+						   new_plane_state);
 }
 
 static struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 1437a87..cb7ef4f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -8,7 +8,6 @@
 
 #include <linux/types.h>
 
-struct drm_crtc_state;
 struct drm_plane;
 struct drm_property;
 struct intel_atomic_state;
@@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 					const struct intel_plane_state *old_plane_state,
 					struct intel_plane_state *intel_state);
 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
-				    struct drm_crtc_state *crtc_state,
+				    struct intel_crtc_state *crtc_state,
 				    const struct intel_plane_state *old_plane_state,
-				    struct drm_plane_state *plane_state);
+				    struct intel_plane_state *plane_state);
 
 #endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 840daff12..c8fd35a 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -72,6 +72,13 @@ struct dp_aud_n_m {
 	u16 n;
 };
 
+struct hdmi_aud_ncts {
+	int sample_rate;
+	int clock;
+	int n;
+	int cts;
+};
+
 /* Values according to DP 1.4 Table 2-104 */
 static const struct dp_aud_n_m dp_aud_n_m[] = {
 	{ 32000, LC_162M, 1024, 10125 },
@@ -148,12 +155,7 @@ static const struct {
 #define TMDS_594M 594000
 #define TMDS_593M 593407
 
-static const struct {
-	int sample_rate;
-	int clock;
-	int n;
-	int cts;
-} hdmi_aud_ncts[] = {
+static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
 	{ 32000, TMDS_296M, 5824, 421875 },
 	{ 32000, TMDS_297M, 3072, 222750 },
 	{ 32000, TMDS_593M, 5824, 843750 },
@@ -184,6 +186,49 @@ static const struct {
 	{ 192000, TMDS_594M, 24576, 594000 },
 };
 
+/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
+/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
+#define TMDS_371M 371250
+#define TMDS_370M 370878
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
+	{ 32000, TMDS_370M, 5824, 527344 },
+	{ 32000, TMDS_371M, 6144, 556875 },
+	{ 44100, TMDS_370M, 8918, 585938 },
+	{ 44100, TMDS_371M, 4704, 309375 },
+	{ 88200, TMDS_370M, 17836, 585938 },
+	{ 88200, TMDS_371M, 9408, 309375 },
+	{ 176400, TMDS_370M, 35672, 585938 },
+	{ 176400, TMDS_371M, 18816, 309375 },
+	{ 48000, TMDS_370M, 11648, 703125 },
+	{ 48000, TMDS_371M, 5120, 309375 },
+	{ 96000, TMDS_370M, 23296, 703125 },
+	{ 96000, TMDS_371M, 10240, 309375 },
+	{ 192000, TMDS_370M, 46592, 703125 },
+	{ 192000, TMDS_371M, 20480, 309375 },
+};
+
+/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
+#define TMDS_445_5M 445500
+#define TMDS_445M 445054
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
+	{ 32000, TMDS_445M, 5824, 632813 },
+	{ 32000, TMDS_445_5M, 4096, 445500 },
+	{ 44100, TMDS_445M, 8918, 703125 },
+	{ 44100, TMDS_445_5M, 4704, 371250 },
+	{ 88200, TMDS_445M, 17836, 703125 },
+	{ 88200, TMDS_445_5M, 9408, 371250 },
+	{ 176400, TMDS_445M, 35672, 703125 },
+	{ 176400, TMDS_445_5M, 18816, 371250 },
+	{ 48000, TMDS_445M, 5824, 421875 },
+	{ 48000, TMDS_445_5M, 5120, 371250 },
+	{ 96000, TMDS_445M, 11648, 421875 },
+	{ 96000, TMDS_445_5M, 10240, 371250 },
+	{ 192000, TMDS_445M, 23296, 421875 },
+	{ 192000, TMDS_445_5M, 20480, 371250 },
+};
+
 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
 static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
 {
@@ -212,14 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
 static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
 				   int rate)
 {
-	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->base.adjusted_mode;
-	int i;
+	const struct hdmi_aud_ncts *hdmi_ncts_table;
+	int i, size;
 
-	for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
-		if (rate == hdmi_aud_ncts[i].sample_rate &&
-		    adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
-			return hdmi_aud_ncts[i].n;
+	if (crtc_state->pipe_bpp == 36) {
+		hdmi_ncts_table = hdmi_aud_ncts_36bpp;
+		size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
+	} else if (crtc_state->pipe_bpp == 30) {
+		hdmi_ncts_table = hdmi_aud_ncts_30bpp;
+		size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
+	} else {
+		hdmi_ncts_table = hdmi_aud_ncts_24bpp;
+		size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
+	}
+
+	for (i = 0; i < size; i++) {
+		if (rate == hdmi_ncts_table[i].sample_rate &&
+		    crtc_state->port_clock == hdmi_ncts_table[i].clock) {
+			return hdmi_ncts_table[i].n;
 		}
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c471088..0c98081 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1668,6 +1668,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		if (!child->device_type)
 			continue;
 
+		DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
+			      child->device_type);
+
 		/*
 		 * Copy as much as we know (sizeof) and is available
 		 * (child_dev_size) of the child device. Accessing the data must
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4e42cfa..0b7be638 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -42,6 +42,7 @@ enum intel_backlight_type {
 	INTEL_BACKLIGHT_DISPLAY_DDI,
 	INTEL_BACKLIGHT_DSI_DCS,
 	INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+	INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
 };
 
 struct edp_power_seq {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 753ac31..7b908e1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
 		clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
 
+		bi->num_qgv_points = qi.num_points;
+
 		for (j = 0; j < qi.num_points; j++) {
 			const struct intel_qgv_point *sp = &qi.points[j];
 			int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
 			bi->deratedbw[j] = min(maxdebw,
 					       bw * 9 / 10); /* 90% */
 
-			DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+			DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
 				      i, j, bi->num_planes, bi->deratedbw[j]);
 		}
 
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
 {
 	int i;
 
-	/* Did we initialize the bw limits successfully? */
-	if (dev_priv->max_bw[0].num_planes == 0)
-		return UINT_MAX;
-
 	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
 		const struct intel_bw_info *bi =
 			&dev_priv->max_bw[i];
 
+		/*
+		 * Pcode will not expose all QGV points when
+		 * SAGV is forced to off/min/med/max.
+		 */
+		if (qgv_point >= bi->num_qgv_points)
+			return UINT_MAX;
+
 		if (num_planes >= bi->num_planes)
 			return bi->deratedbw[qgv_point];
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8993ab2..0b8b8ae 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1756,9 +1756,10 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 
 static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
 {
-	int ranges_24[] = { 312000, 552000, 648000 };
-	int ranges_19_38[] = { 307200, 556800, 652800 };
-	int *ranges;
+	static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
+	static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
+	const int *ranges;
+	int len, i;
 
 	switch (ref) {
 	default:
@@ -1766,19 +1767,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
 		/* fall through */
 	case 24000:
 		ranges = ranges_24;
+		len = ARRAY_SIZE(ranges_24);
 		break;
 	case 19200:
 	case 38400:
 		ranges = ranges_19_38;
+		len = ARRAY_SIZE(ranges_19_38);
 		break;
 	}
 
-	if (min_cdclk > ranges[1])
-		return ranges[2];
-	else if (min_cdclk > ranges[0])
-		return ranges[1];
-	else
-		return ranges[0];
+	for (i = 0; i < len; i++) {
+		if (min_cdclk <= ranges[i])
+			return ranges[i];
+	}
+
+	WARN_ON(min_cdclk > ranges[len - 1]);
+	return ranges[len - 1];
 }
 
 static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@@ -1792,16 +1796,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
 	default:
 		MISSING_CASE(cdclk);
 		/* fall through */
+	case 172800:
 	case 307200:
 	case 556800:
 	case 652800:
 		WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
 			dev_priv->cdclk.hw.ref != 38400);
 		break;
+	case 180000:
 	case 312000:
 	case 552000:
 	case 648000:
 		WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+		break;
+	case 192000:
+		WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+			dev_priv->cdclk.hw.ref != 38400 &&
+			dev_priv->cdclk.hw.ref != 24000);
+		break;
 	}
 
 	ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
@@ -1854,14 +1866,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
 	dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
 }
 
-static u8 icl_calc_voltage_level(int cdclk)
+static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
 {
-	if (cdclk > 556800)
-		return 2;
-	else if (cdclk > 312000)
-		return 1;
-	else
-		return 0;
+	if (IS_ELKHARTLAKE(dev_priv)) {
+		if (cdclk > 312000)
+			return 2;
+		else if (cdclk > 180000)
+			return 1;
+		else
+			return 0;
+	} else {
+		if (cdclk > 556800)
+			return 2;
+		else if (cdclk > 312000)
+			return 1;
+		else
+			return 0;
+	}
 }
 
 static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1912,7 +1933,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
 	 * at least what the CDCLK frequency requires.
 	 */
 	cdclk_state->voltage_level =
-		icl_calc_voltage_level(cdclk_state->cdclk);
+		icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
 }
 
 static void icl_init_cdclk(struct drm_i915_private *dev_priv)
@@ -1947,7 +1968,8 @@ static void icl_init_cdclk(struct drm_i915_private *dev_priv)
 	sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
 						     sanitized_state.cdclk);
 	sanitized_state.voltage_level =
-				icl_calc_voltage_level(sanitized_state.cdclk);
+				icl_calc_voltage_level(dev_priv,
+						       sanitized_state.cdclk);
 
 	icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
 }
@@ -1958,7 +1980,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
 
 	cdclk_state.cdclk = cdclk_state.bypass;
 	cdclk_state.vco = 0;
-	cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
+	cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
+							   cdclk_state.cdclk);
 
 	icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
 }
@@ -2549,7 +2572,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
 	state->cdclk.logical.vco = vco;
 	state->cdclk.logical.cdclk = cdclk;
 	state->cdclk.logical.voltage_level =
-		max(icl_calc_voltage_level(cdclk),
+		max(icl_calc_voltage_level(dev_priv, cdclk),
 		    cnl_compute_min_voltage_level(state));
 
 	if (!state->active_crtcs) {
@@ -2559,7 +2582,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
 		state->cdclk.actual.vco = vco;
 		state->cdclk.actual.cdclk = cdclk;
 		state->cdclk.actual.voltage_level =
-			icl_calc_voltage_level(cdclk);
+			icl_calc_voltage_level(dev_priv, cdclk);
 	} else {
 		state->cdclk.actual = state->cdclk.logical;
 	}
@@ -2594,7 +2617,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  */
 void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 {
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (IS_ELKHARTLAKE(dev_priv)) {
+		if (dev_priv->cdclk.hw.ref == 24000)
+			dev_priv->max_cdclk_freq = 552000;
+		else
+			dev_priv->max_cdclk_freq = 556800;
+	} else if (INTEL_GEN(dev_priv) >= 11) {
 		if (dev_priv->cdclk.hw.ref == 24000)
 			dev_priv->max_cdclk_freq = 648000;
 		else
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 841708d..d3d5244 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -183,9 +183,13 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
 				  enum port port)
 {
-	return !(I915_READ(ICL_PHY_MISC(port)) &
-		 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
-		(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+	/* The PHY C added by EHL has no PHY_MISC register */
+	if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C)
+		return I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT;
+	else
+		return !(I915_READ(ICL_PHY_MISC(port)) &
+			 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+			(I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
 }
 
 static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
@@ -260,6 +264,32 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
 	I915_WRITE(ICL_PORT_CL_DW10(port), val);
 }
 
+static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
+{
+	bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
+	bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
+	bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+	/*
+	 * VBT's 'dvo port' field for child devices references the DDI, not
+	 * the PHY.  So if combo PHY A is wired up to drive an external
+	 * display, we should see a child device present on PORT_D and
+	 * nothing on PORT_A and no DSI.
+	 */
+	if (ddi_d_present && !ddi_a_present && !dsi_present)
+		return val | ICL_PHY_MISC_MUX_DDID;
+
+	/*
+	 * If we encounter a VBT that claims to have an external display on
+	 * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+	 * in the log and let the internal display win.
+	 */
+	if (ddi_d_present)
+		DRM_ERROR("VBT claims to have both internal and external displays on PHY A.  Configuring for internal.\n");
+
+	return val & ~ICL_PHY_MISC_MUX_DDID;
+}
+
 static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
 {
 	enum port port;
@@ -273,10 +303,29 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
 			continue;
 		}
 
+		/*
+		 * Although EHL adds a combo PHY C, there's no PHY_MISC
+		 * register for it and no need to program the
+		 * DE_IO_COMP_PWR_DOWN setting on PHY C.
+		 */
+		if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C)
+			goto skip_phy_misc;
+
+		/*
+		 * EHL's combo PHY A can be hooked up to either an external
+		 * display (via DDI-D) or an internal display (via DDI-A or
+		 * the DSI DPHY).  This is a motherboard design decision that
+		 * can't be changed on the fly, so initialize the PHY's mux
+		 * based on whether our VBT indicates the presence of any
+		 * "internal" child devices.
+		 */
 		val = I915_READ(ICL_PHY_MISC(port));
+		if (IS_ELKHARTLAKE(dev_priv) && port == PORT_A)
+			val = ehl_combo_phy_a_mux(dev_priv, val);
 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
 		I915_WRITE(ICL_PHY_MISC(port), val);
 
+skip_phy_misc:
 		cnl_set_procmon_ref_values(dev_priv, port);
 
 		if (port == PORT_A) {
@@ -307,10 +356,19 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 			DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
 				 port_name(port));
 
+		/*
+		 * Although EHL adds a combo PHY C, there's no PHY_MISC
+		 * register for it and no need to program the
+		 * DE_IO_COMP_PWR_DOWN setting on PHY C.
+		 */
+		if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C)
+			goto skip_phy_misc;
+
 		val = I915_READ(ICL_PHY_MISC(port));
 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
 		I915_WRITE(ICL_PHY_MISC(port), val);
 
+skip_phy_misc:
 		val = I915_READ(ICL_PORT_COMP_DW0(port));
 		val &= ~COMP_INIT;
 		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a17..30e4860 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -45,6 +45,7 @@
 #include "intel_lspcon.h"
 #include "intel_panel.h"
 #include "intel_psr.h"
+#include "intel_tc.h"
 #include "intel_vdsc.h"
 
 struct ddi_buf_trans {
@@ -846,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 }
 
 static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
-			int type, int rate, int *n_entries)
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+			int *n_entries)
 {
 	if (type == INTEL_OUTPUT_HDMI) {
 		*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
@@ -872,7 +873,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
 	if (INTEL_GEN(dev_priv) >= 11) {
 		if (intel_port_is_combophy(dev_priv, port))
-			icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+			icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
 						0, &n_entries);
 		else
 			n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2231,7 +2232,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 
 	if (INTEL_GEN(dev_priv) >= 11) {
 		if (intel_port_is_combophy(dev_priv, port))
-			icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+			icl_get_combo_buf_trans(dev_priv, encoder->type,
 						intel_dp->link_rate, &n_entries);
 		else
 			n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2420,8 +2421,8 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
 	u32 n_entries, val;
 	int ln;
 
-	ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
-						   rate, &n_entries);
+	ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+						   &n_entries);
 	if (!ddi_translations)
 		return;
 
@@ -2995,25 +2996,22 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	enum port port = intel_dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	u32 ln0, ln1, lane_info;
+	u32 ln0, ln1, lane_mask;
 
-	if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+	if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
 		return;
 
 	ln0 = I915_READ(MG_DP_MODE(0, port));
 	ln1 = I915_READ(MG_DP_MODE(1, port));
 
-	switch (intel_dig_port->tc_type) {
-	case TC_PORT_TYPEC:
+	switch (intel_dig_port->tc_mode) {
+	case TC_PORT_DP_ALT:
 		ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
 		ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
 
-		lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
-			     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
-			    DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+		lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
 
-		switch (lane_info) {
+		switch (lane_mask) {
 		case 0x1:
 		case 0x4:
 			break;
@@ -3038,7 +3036,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 			       MG_DP_MODE_CFG_DP_X2_MODE;
 			break;
 		default:
-			MISSING_CASE(lane_info);
+			MISSING_CASE(lane_mask);
 		}
 		break;
 
@@ -3048,7 +3046,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
 		break;
 
 	default:
-		MISSING_CASE(intel_dig_port->tc_type);
+		MISSING_CASE(intel_dig_port->tc_mode);
 		return;
 	}
 
@@ -3123,7 +3121,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
 	intel_ddi_clk_select(encoder, crtc_state);
 
-	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+	if (!intel_port_is_tc(dev_priv, port) ||
+	    dig_port->tc_mode != TC_PORT_TBT_ALT)
+		intel_display_power_get(dev_priv,
+					dig_port->ddi_io_power_domain);
 
 	icl_program_mg_dp_mode(dig_port);
 	icl_disable_phy_clock_gating(dig_port);
@@ -3305,8 +3306,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 	intel_edp_panel_vdd_on(intel_dp);
 	intel_edp_panel_off(intel_dp);
 
-	intel_display_power_put_unchecked(dev_priv,
-					  dig_port->ddi_io_power_domain);
+	if (!intel_port_is_tc(dev_priv, encoder->port) ||
+	    dig_port->tc_mode != TC_PORT_TBT_ALT)
+		intel_display_power_put_unchecked(dev_priv,
+						  dig_port->ddi_io_power_domain);
 
 	intel_ddi_clk_disable(encoder);
 }
@@ -3601,6 +3604,8 @@ static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
 	u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
 
+	WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+
 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
 	switch (pipe_config->lane_count) {
 	case 1:
@@ -3621,16 +3626,43 @@ static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
 }
 
 static void
+intel_ddi_update_prepare(struct intel_atomic_state *state,
+			 struct intel_encoder *encoder,
+			 struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state =
+		crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
+	int required_lanes = crtc_state ? crtc_state->lane_count : 1;
+
+	WARN_ON(crtc && crtc->active);
+
+	intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
+	if (crtc_state && crtc_state->base.active)
+		intel_update_active_dpll(state, crtc, encoder);
+}
+
+static void
+intel_ddi_update_complete(struct intel_atomic_state *state,
+			  struct intel_encoder *encoder,
+			  struct intel_crtc *crtc)
+{
+	intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
+}
+
+static void
 intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
 			 const struct intel_crtc_state *crtc_state,
 			 const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port);
 	enum port port = encoder->port;
 
-	if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_port_is_tc(dev_priv, encoder->port))
+	if (is_tc_port)
+		intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+
+	if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
 		intel_display_power_get(dev_priv,
 					intel_ddi_main_link_aux_domain(dig_port));
 
@@ -3642,8 +3674,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
 	 * Program the lane count for static/dynamic connections on Type-C ports.
 	 * Skip this step for TBT.
 	 */
-	if (dig_port->tc_type == TC_PORT_UNKNOWN ||
-	    dig_port->tc_type == TC_PORT_TBT)
+	if (dig_port->tc_mode == TC_PORT_TBT_ALT)
 		return;
 
 	intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
@@ -3656,11 +3687,14 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port);
 
-	if (intel_crtc_has_dp_encoder(crtc_state) ||
-	    intel_port_is_tc(dev_priv, encoder->port))
+	if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
 		intel_display_power_put_unchecked(dev_priv,
 						  intel_ddi_main_link_aux_domain(dig_port));
+
+	if (is_tc_port)
+		intel_tc_port_put_link(dig_port);
 }
 
 static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3737,7 +3771,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-	struct intel_digital_port *intel_dig_port;
 	u32 temp, flags = 0;
 
 	/* XXX: DSI transcoder paranoia */
@@ -3776,7 +3809,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 	switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
 	case TRANS_DDI_MODE_SELECT_HDMI:
 		pipe_config->has_hdmi_sink = true;
-		intel_dig_port = enc_to_dig_port(&encoder->base);
 
 		pipe_config->infoframes.enable |=
 			intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -3914,49 +3946,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 	return 0;
 }
 
-static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
-{
-	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
-	intel_dp_encoder_suspend(encoder);
-
-	/*
-	 * TODO: disconnect also from USB DP alternate mode once we have a
-	 * way to handle the modeset restore in that mode during resume
-	 * even if the sink has disappeared while being suspended.
-	 */
-	if (dig_port->tc_legacy_port)
-		icl_tc_phy_disconnect(i915, dig_port);
-}
-
-static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
-{
-	struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
-	struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
-
-	if (intel_port_is_tc(i915, dig_port->base.port))
-		intel_digital_port_connected(&dig_port->base);
-
-	intel_dp_encoder_reset(drm_encoder);
-}
-
 static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
-	struct drm_i915_private *i915 = to_i915(encoder->dev);
 
 	intel_dp_encoder_flush_work(encoder);
 
-	if (intel_port_is_tc(i915, dig_port->base.port))
-		icl_tc_phy_disconnect(i915, dig_port);
-
 	drm_encoder_cleanup(encoder);
 	kfree(dig_port);
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-	.reset = intel_ddi_encoder_reset,
+	.reset = intel_dp_encoder_reset,
 	.destroy = intel_ddi_encoder_destroy,
 };
 
@@ -4242,7 +4243,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_encoder->update_pipe = intel_ddi_update_pipe;
 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 	intel_encoder->get_config = intel_ddi_get_config;
-	intel_encoder->suspend = intel_ddi_encoder_suspend;
+	intel_encoder->suspend = intel_dp_encoder_suspend;
 	intel_encoder->get_power_domains = intel_ddi_get_power_domains;
 	intel_encoder->type = INTEL_OUTPUT_DDI;
 	intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4261,9 +4262,15 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
 	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
-	intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
-					 !port_info->supports_typec_usb &&
-					 !port_info->supports_tbt;
+	if (intel_port_is_tc(dev_priv, port)) {
+		bool is_legacy = !port_info->supports_typec_usb &&
+				 !port_info->supports_tbt;
+
+		intel_tc_port_init(intel_dig_port, is_legacy);
+
+		intel_encoder->update_prepare = intel_ddi_update_prepare;
+		intel_encoder->update_complete = intel_ddi_update_complete;
+	}
 
 	switch (port) {
 	case PORT_A:
@@ -4324,9 +4331,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
 	intel_infoframe_init(intel_dig_port);
 
-	if (intel_port_is_tc(dev_priv, port))
-		intel_digital_port_connected(intel_encoder);
-
 	return;
 
 err:
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8592a7d..f070818 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -78,6 +78,7 @@
 #include "intel_quirks.h"
 #include "intel_sideband.h"
 #include "intel_sprite.h"
+#include "intel_tc.h"
 
 /* Primary plane formats for gen <= 3 */
 static const u32 i8xx_primary_formats[] = {
@@ -515,9 +516,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 }
 
 static bool
-needs_modeset(const struct drm_crtc_state *state)
+needs_modeset(const struct intel_crtc_state *state)
 {
-	return drm_atomic_crtc_needs_modeset(state);
+	return drm_atomic_crtc_needs_modeset(&state->base);
 }
 
 /*
@@ -1839,7 +1840,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 		/* FIXME: assert CPU port conditions for SNB+ */
 	}
 
-	trace_intel_pipe_enable(dev_priv, pipe);
+	trace_intel_pipe_enable(crtc);
 
 	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
@@ -1880,7 +1881,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 	 */
 	assert_planes_disabled(crtc);
 
-	trace_intel_pipe_disable(dev_priv, pipe);
+	trace_intel_pipe_disable(crtc);
 
 	reg = PIPECONF(cpu_transcoder);
 	val = I915_READ(reg);
@@ -3715,10 +3716,27 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
 	return 0;
 }
 
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		return i9xx_plane == PLANE_B;
+	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+		return false;
+	else if (IS_GEN(dev_priv, 4))
+		return i9xx_plane == PLANE_C;
+	else
+		return i9xx_plane == PLANE_B ||
+			i9xx_plane == PLANE_C;
+}
+
 static int
 i9xx_plane_check(struct intel_crtc_state *crtc_state,
 		 struct intel_plane_state *plane_state)
 {
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 	int ret;
 
 	ret = chv_plane_check_rotation(plane_state);
@@ -3729,7 +3747,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
 						  &crtc_state->base,
 						  DRM_PLANE_HELPER_NO_SCALING,
 						  DRM_PLANE_HELPER_NO_SCALING,
-						  false, true);
+						  i9xx_plane_has_windowing(plane),
+						  true);
 	if (ret)
 		return ret;
 
@@ -3758,6 +3777,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	u32 linear_offset;
 	int x = plane_state->color_plane[0].x;
 	int y = plane_state->color_plane[0].y;
+	int crtc_x = plane_state->base.dst.x1;
+	int crtc_y = plane_state->base.dst.y1;
+	int crtc_w = drm_rect_width(&plane_state->base.dst);
+	int crtc_h = drm_rect_height(&plane_state->base.dst);
 	unsigned long irqflags;
 	u32 dspaddr_offset;
 	u32 dspcntr;
@@ -3776,18 +3799,18 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
 
 	if (INTEL_GEN(dev_priv) < 4) {
-		/* pipesrc and dspsize control the size that is scaled from,
-		 * which should always be the user's requested size.
+		/*
+		 * PLANE_A doesn't actually have a full window
+		 * generator but let's assume we still need to
+		 * program whatever is there.
 		 */
-		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+		I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
-			      ((crtc_state->pipe_src_h - 1) << 16) |
-			      (crtc_state->pipe_src_w - 1));
+			      ((crtc_h - 1) << 16) | (crtc_w - 1));
 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
-		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+		I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
-			      ((crtc_state->pipe_src_h - 1) << 16) |
-			      (crtc_state->pipe_src_w - 1));
+			      ((crtc_h - 1) << 16) | (crtc_w - 1));
 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
 	}
 
@@ -3950,10 +3973,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_ARGB8888:
 		return PLANE_CTL_FORMAT_XRGB_8888;
+	case DRM_FORMAT_XBGR2101010:
+		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
 	case DRM_FORMAT_XRGB2101010:
 		return PLANE_CTL_FORMAT_XRGB_2101010;
-	case DRM_FORMAT_XBGR2101010:
-		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
 	case DRM_FORMAT_XBGR16161616F:
 	case DRM_FORMAT_ABGR16161616F:
 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
@@ -5796,7 +5819,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
 	if (!old_crtc_state->ips_enabled)
 		return false;
 
-	if (needs_modeset(&new_crtc_state->base))
+	if (needs_modeset(new_crtc_state))
 		return true;
 
 	/*
@@ -5823,7 +5846,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 	if (!new_crtc_state->ips_enabled)
 		return false;
 
-	if (needs_modeset(&new_crtc_state->base))
+	if (needs_modeset(new_crtc_state))
 		return true;
 
 	/*
@@ -5877,13 +5900,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *old_state = old_crtc_state->base.state;
+	struct drm_atomic_state *state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
-		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+		intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
 						crtc);
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_primary_state =
-		drm_atomic_get_old_plane_state(old_state, primary);
+		drm_atomic_get_old_plane_state(state, primary);
 
 	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
@@ -5895,12 +5918,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 
 	if (old_primary_state) {
 		struct drm_plane_state *new_primary_state =
-			drm_atomic_get_new_plane_state(old_state, primary);
+			drm_atomic_get_new_plane_state(state, primary);
 
 		intel_fbc_post_update(crtc);
 
 		if (new_primary_state->visible &&
-		    (needs_modeset(&pipe_config->base) ||
+		    (needs_modeset(pipe_config) ||
 		     !old_primary_state->visible))
 			intel_post_enable_primary(&crtc->base, pipe_config);
 	}
@@ -5920,20 +5943,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_atomic_state *old_state = old_crtc_state->base.state;
+	struct drm_atomic_state *state = old_crtc_state->base.state;
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_primary_state =
-		drm_atomic_get_old_plane_state(old_state, primary);
-	bool modeset = needs_modeset(&pipe_config->base);
-	struct intel_atomic_state *old_intel_state =
-		to_intel_atomic_state(old_state);
+		drm_atomic_get_old_plane_state(state, primary);
+	bool modeset = needs_modeset(pipe_config);
+	struct intel_atomic_state *intel_state =
+		to_intel_atomic_state(state);
 
 	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
 		hsw_disable_ips(old_crtc_state);
 
 	if (old_primary_state) {
 		struct intel_plane_state *new_primary_state =
-			intel_atomic_get_new_plane_state(old_intel_state,
+			intel_atomic_get_new_plane_state(intel_state,
 							 to_intel_plane(primary));
 
 		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
@@ -5984,7 +6007,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
 	 * watermark programming here.
 	 */
-	if (needs_modeset(&pipe_config->base))
+	if (needs_modeset(pipe_config))
 		return;
 
 	/*
@@ -6002,7 +6025,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 * us to.
 	 */
 	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state,
+		dev_priv->display.initial_watermarks(intel_state,
 						     pipe_config);
 	else if (pipe_config->update_wm_pre)
 		intel_update_watermarks(crtc);
@@ -6036,19 +6059,111 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
 	intel_frontbuffer_flip(dev_priv, fb_bits);
 }
 
-static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+/*
+ * intel_connector_primary_encoder - get the primary encoder for a connector
+ * @connector: connector for which to return the encoder
+ *
+ * Returns the primary encoder for a connector. There is a 1:1 mapping from
+ * all connectors to their encoder, except for DP-MST connectors which have
+ * both a virtual and a primary encoder. These DP-MST primary encoders can be
+ * pointed to by as many DP-MST connectors as there are pipes.
+ */
+static struct intel_encoder *
+intel_connector_primary_encoder(struct intel_connector *connector)
+{
+	struct intel_encoder *encoder;
+
+	if (connector->mst_port)
+		return &dp_to_dig_port(connector->mst_port)->base;
+
+	encoder = intel_attached_encoder(&connector->base);
+	WARN_ON(!encoder);
+
+	return encoder;
+}
+
+static bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+			      const struct drm_connector_state *old_conn_state,
+			      const struct drm_connector_state *new_conn_state)
+{
+	struct intel_crtc *old_crtc = old_conn_state->crtc ?
+				      to_intel_crtc(old_conn_state->crtc) : NULL;
+	struct intel_crtc *new_crtc = new_conn_state->crtc ?
+				      to_intel_crtc(new_conn_state->crtc) : NULL;
+
+	return new_crtc != old_crtc ||
+	       (new_crtc &&
+		needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
+}
+
+static void intel_encoders_update_prepare(struct intel_atomic_state *state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *new_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_oldnew_connector_in_state(&state->base, conn,
+					   old_conn_state, new_conn_state, i) {
+		struct intel_encoder *encoder;
+		struct intel_crtc *crtc;
+
+		if (!intel_connector_needs_modeset(state,
+						   old_conn_state,
+						   new_conn_state))
+			continue;
+
+		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+		if (!encoder->update_prepare)
+			continue;
+
+		crtc = new_conn_state->crtc ?
+			to_intel_crtc(new_conn_state->crtc) : NULL;
+		encoder->update_prepare(state, encoder, crtc);
+	}
+}
+
+static void intel_encoders_update_complete(struct intel_atomic_state *state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector_state *new_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_oldnew_connector_in_state(&state->base, conn,
+					   old_conn_state, new_conn_state, i) {
+		struct intel_encoder *encoder;
+		struct intel_crtc *crtc;
+
+		if (!intel_connector_needs_modeset(state,
+						   old_conn_state,
+						   new_conn_state))
+			continue;
+
+		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+		if (!encoder->update_complete)
+			continue;
+
+		crtc = new_conn_state->crtc ?
+			to_intel_crtc(new_conn_state->crtc) : NULL;
+		encoder->update_complete(state, encoder, crtc);
+	}
+}
+
+static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
 					  struct intel_crtc_state *crtc_state,
-					  struct drm_atomic_state *old_state)
+					  struct intel_atomic_state *state)
 {
 	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
-		if (conn_state->crtc != crtc)
+		if (conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->pre_pll_enable)
@@ -6056,19 +6171,19 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+static void intel_encoders_pre_enable(struct intel_crtc *crtc,
 				      struct intel_crtc_state *crtc_state,
-				      struct drm_atomic_state *old_state)
+				      struct intel_atomic_state *state)
 {
 	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
-		if (conn_state->crtc != crtc)
+		if (conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->pre_enable)
@@ -6076,19 +6191,19 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_enable(struct drm_crtc *crtc,
+static void intel_encoders_enable(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state,
-				  struct drm_atomic_state *old_state)
+				  struct intel_atomic_state *state)
 {
 	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
-		if (conn_state->crtc != crtc)
+		if (conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->enable)
@@ -6097,19 +6212,19 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_disable(struct drm_crtc *crtc,
+static void intel_encoders_disable(struct intel_crtc *crtc,
 				   struct intel_crtc_state *old_crtc_state,
-				   struct drm_atomic_state *old_state)
+				   struct intel_atomic_state *state)
 {
 	struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
-		if (old_conn_state->crtc != crtc)
+		if (old_conn_state->crtc != &crtc->base)
 			continue;
 
 		intel_opregion_notify_encoder(encoder, false);
@@ -6118,19 +6233,19 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_post_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_disable(struct intel_crtc *crtc,
 					struct intel_crtc_state *old_crtc_state,
-					struct drm_atomic_state *old_state)
+					struct intel_atomic_state *state)
 {
 	struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
-		if (old_conn_state->crtc != crtc)
+		if (old_conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->post_disable)
@@ -6138,19 +6253,19 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
 					    struct intel_crtc_state *old_crtc_state,
-					    struct drm_atomic_state *old_state)
+					    struct intel_atomic_state *state)
 {
 	struct drm_connector_state *old_conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(old_conn_state->best_encoder);
 
-		if (old_conn_state->crtc != crtc)
+		if (old_conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->post_pll_disable)
@@ -6158,19 +6273,19 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
 	}
 }
 
-static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+static void intel_encoders_update_pipe(struct intel_crtc *crtc,
 				       struct intel_crtc_state *crtc_state,
-				       struct drm_atomic_state *old_state)
+				       struct intel_atomic_state *state)
 {
 	struct drm_connector_state *conn_state;
 	struct drm_connector *conn;
 	int i;
 
-	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
 		struct intel_encoder *encoder =
 			to_intel_encoder(conn_state->best_encoder);
 
-		if (conn_state->crtc != crtc)
+		if (conn_state->crtc != &crtc->base)
 			continue;
 
 		if (encoder->update_pipe)
@@ -6187,15 +6302,13 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat
 }
 
 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
-				 struct drm_atomic_state *old_state)
+				 struct intel_atomic_state *state)
 {
 	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	struct intel_atomic_state *old_intel_state =
-		to_intel_atomic_state(old_state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -6231,7 +6344,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_crtc->active = true;
 
-	intel_encoders_pre_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
 
 	if (pipe_config->has_pch_encoder) {
 		/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -6255,16 +6368,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_disable_primary_plane(pipe_config);
 
 	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+		dev_priv->display.initial_watermarks(state, pipe_config);
 	intel_enable_pipe(pipe_config);
 
 	if (pipe_config->has_pch_encoder)
-		ironlake_pch_enable(old_intel_state, pipe_config);
+		ironlake_pch_enable(state, pipe_config);
 
 	assert_vblank_disabled(crtc);
 	intel_crtc_vblank_on(pipe_config);
 
-	intel_encoders_enable(crtc, pipe_config, old_state);
+	intel_encoders_enable(intel_crtc, pipe_config, state);
 
 	if (HAS_PCH_CPT(dev_priv))
 		cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -6317,26 +6430,24 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 }
 
 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
-				struct drm_atomic_state *old_state)
+				struct intel_atomic_state *state)
 {
 	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-	struct intel_atomic_state *old_intel_state =
-		to_intel_atomic_state(old_state);
 	bool psl_clkgate_wa;
 
 	if (WARN_ON(intel_crtc->active))
 		return;
 
-	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
 
 	if (pipe_config->shared_dpll)
 		intel_enable_shared_dpll(pipe_config);
 
-	intel_encoders_pre_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
 
 	if (intel_crtc_has_dp_encoder(pipe_config))
 		intel_dp_set_m_n(pipe_config, M1_N1);
@@ -6394,7 +6505,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 		intel_ddi_enable_transcoder_func(pipe_config);
 
 	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+		dev_priv->display.initial_watermarks(state, pipe_config);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_pipe_mbus_enable(intel_crtc);
@@ -6404,7 +6515,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 		intel_enable_pipe(pipe_config);
 
 	if (pipe_config->has_pch_encoder)
-		lpt_pch_enable(old_intel_state, pipe_config);
+		lpt_pch_enable(state, pipe_config);
 
 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
 		intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -6412,7 +6523,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	assert_vblank_disabled(crtc);
 	intel_crtc_vblank_on(pipe_config);
 
-	intel_encoders_enable(crtc, pipe_config, old_state);
+	intel_encoders_enable(intel_crtc, pipe_config, state);
 
 	if (psl_clkgate_wa) {
 		intel_wait_for_vblank(dev_priv, pipe);
@@ -6444,7 +6555,7 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 }
 
 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
-				  struct drm_atomic_state *old_state)
+				  struct intel_atomic_state *state)
 {
 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
@@ -6460,7 +6571,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
-	intel_encoders_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_disable(intel_crtc, old_crtc_state, state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -6472,7 +6583,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (old_crtc_state->has_pch_encoder)
 		ironlake_fdi_disable(crtc);
 
-	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
 
 	if (old_crtc_state->has_pch_encoder) {
 		ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -6503,14 +6614,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
 }
 
 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
-				 struct drm_atomic_state *old_state)
+				 struct intel_atomic_state *state)
 {
 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 
-	intel_encoders_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_disable(intel_crtc, old_crtc_state, state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -6532,9 +6643,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	else
 		ironlake_pfit_disable(old_crtc_state);
 
-	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
 
-	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
 }
 
 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -6614,6 +6725,25 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
 enum intel_display_power_domain
 intel_aux_power_domain(struct intel_digital_port *dig_port)
 {
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+
+	if (intel_port_is_tc(dev_priv, dig_port->base.port) &&
+	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
+		switch (dig_port->aux_ch) {
+		case AUX_CH_C:
+			return POWER_DOMAIN_AUX_TBT1;
+		case AUX_CH_D:
+			return POWER_DOMAIN_AUX_TBT2;
+		case AUX_CH_E:
+			return POWER_DOMAIN_AUX_TBT3;
+		case AUX_CH_F:
+			return POWER_DOMAIN_AUX_TBT4;
+		default:
+			MISSING_CASE(dig_port->aux_ch);
+			return POWER_DOMAIN_AUX_TBT1;
+		}
+	}
+
 	switch (dig_port->aux_ch) {
 	case AUX_CH_A:
 		return POWER_DOMAIN_AUX_A;
@@ -6633,14 +6763,13 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
 	}
 }
 
-static u64 get_crtc_power_domains(struct drm_crtc *crtc,
+static u64 get_crtc_power_domains(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_encoder *encoder;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
+	enum pipe pipe = crtc->pipe;
 	u64 mask;
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
@@ -6669,16 +6798,15 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
 }
 
 static u64
-modeset_get_crtc_power_domains(struct drm_crtc *crtc,
+modeset_get_crtc_power_domains(struct intel_crtc *crtc,
 			       struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum intel_display_power_domain domain;
 	u64 domains, new_domains, old_domains;
 
-	old_domains = intel_crtc->enabled_power_domains;
-	intel_crtc->enabled_power_domains = new_domains =
+	old_domains = crtc->enabled_power_domains;
+	crtc->enabled_power_domains = new_domains =
 		get_crtc_power_domains(crtc, crtc_state);
 
 	domains = new_domains & ~old_domains;
@@ -6699,10 +6827,8 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
 }
 
 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
-				   struct drm_atomic_state *old_state)
+				   struct intel_atomic_state *state)
 {
-	struct intel_atomic_state *old_intel_state =
-		to_intel_atomic_state(old_state);
 	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6729,7 +6855,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
 
 	if (IS_CHERRYVIEW(dev_priv)) {
 		chv_prepare_pll(intel_crtc, pipe_config);
@@ -6739,7 +6865,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 		vlv_enable_pll(intel_crtc, pipe_config);
 	}
 
-	intel_encoders_pre_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
 
 	i9xx_pfit_enable(pipe_config);
 
@@ -6748,14 +6874,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
 	/* update DSPCNTR to configure gamma for pipe bottom color */
 	intel_disable_primary_plane(pipe_config);
 
-	dev_priv->display.initial_watermarks(old_intel_state,
-					     pipe_config);
+	dev_priv->display.initial_watermarks(state, pipe_config);
 	intel_enable_pipe(pipe_config);
 
 	assert_vblank_disabled(crtc);
 	intel_crtc_vblank_on(pipe_config);
 
-	intel_encoders_enable(crtc, pipe_config, old_state);
+	intel_encoders_enable(intel_crtc, pipe_config, state);
 }
 
 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
@@ -6768,10 +6893,8 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
 }
 
 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
-			     struct drm_atomic_state *old_state)
+			     struct intel_atomic_state *state)
 {
-	struct intel_atomic_state *old_intel_state =
-		to_intel_atomic_state(old_state);
 	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6796,7 +6919,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (!IS_GEN(dev_priv, 2))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	intel_encoders_pre_enable(crtc, pipe_config, old_state);
+	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
 
 	i9xx_enable_pll(intel_crtc, pipe_config);
 
@@ -6808,7 +6931,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 	intel_disable_primary_plane(pipe_config);
 
 	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state,
+		dev_priv->display.initial_watermarks(state,
 						     pipe_config);
 	else
 		intel_update_watermarks(intel_crtc);
@@ -6817,7 +6940,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 	assert_vblank_disabled(crtc);
 	intel_crtc_vblank_on(pipe_config);
 
-	intel_encoders_enable(crtc, pipe_config, old_state);
+	intel_encoders_enable(intel_crtc, pipe_config, state);
 }
 
 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@@ -6836,7 +6959,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 }
 
 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
-			      struct drm_atomic_state *old_state)
+			      struct intel_atomic_state *state)
 {
 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
@@ -6851,7 +6974,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 	if (IS_GEN(dev_priv, 2))
 		intel_wait_for_vblank(dev_priv, pipe);
 
-	intel_encoders_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_disable(intel_crtc, old_crtc_state, state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -6860,7 +6983,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 
 	i9xx_pfit_disable(old_crtc_state);
 
-	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
 
 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
 		if (IS_CHERRYVIEW(dev_priv))
@@ -6871,7 +6994,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 			i9xx_disable_pll(old_crtc_state);
 	}
 
-	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
 
 	if (!IS_GEN(dev_priv, 2))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6925,7 +7048,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
 
 	WARN_ON(IS_ERR(crtc_state) || ret);
 
-	dev_priv->display.crtc_disable(crtc_state, state);
+	dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
 
 	drm_atomic_state_put(state);
 
@@ -6988,7 +7111,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
 
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
-static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 					 struct drm_connector_state *conn_state)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -7006,7 +7129,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
 		if (!crtc_state)
 			return;
 
-		I915_STATE_WARN(!crtc_state->active,
+		I915_STATE_WARN(!crtc_state->base.active,
 		      "connector is active, but attached crtc isn't\n");
 
 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -7018,7 +7141,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
 			"attached encoder crtc differs from connector crtc\n");
 	} else {
-		I915_STATE_WARN(crtc_state && crtc_state->active,
+		I915_STATE_WARN(crtc_state && crtc_state->base.active,
 			"attached crtc is active, but connector isn't\n");
 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
 			"best encoder set without crtc!\n");
@@ -9484,6 +9607,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 				       struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(crtc_state->base.state);
 	const struct intel_limit *limit;
 	int refclk = 120000;
 
@@ -9525,7 +9650,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 
 	ironlake_compute_dpll(crtc, crtc_state, NULL);
 
-	if (!intel_get_shared_dpll(crtc_state, NULL)) {
+	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
 			      pipe_name(crtc->pipe));
 		return -EINVAL;
@@ -9906,7 +10031,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 		struct intel_encoder *encoder =
 			intel_get_crtc_new_encoder(state, crtc_state);
 
-		if (!intel_get_shared_dpll(crtc_state, encoder)) {
+		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
 				      pipe_name(crtc->pipe));
 			return -EINVAL;
@@ -9936,22 +10061,36 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
 				enum port port,
 				struct intel_crtc_state *pipe_config)
 {
+	enum icl_port_dpll_id port_dpll_id;
 	enum intel_dpll_id id;
 	u32 temp;
 
-	/* TODO: TBT pll not implemented. */
 	if (intel_port_is_combophy(dev_priv, port)) {
 		temp = I915_READ(DPCLKA_CFGCR0_ICL) &
 		       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 		id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
 	} else if (intel_port_is_tc(dev_priv, port)) {
-		id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+		u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+		if (clk_sel == DDI_CLK_SEL_MG) {
+			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+								    port));
+			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+		} else {
+			WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+			id = DPLL_ID_ICL_TBTPLL;
+			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+		}
 	} else {
 		WARN(1, "Invalid port %x\n", port);
 		return;
 	}
 
-	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+	pipe_config->icl_port_dplls[port_dpll_id].pll =
+		intel_get_shared_dpll_by_id(dev_priv, id);
+
+	icl_set_active_port_dpll(pipe_config, port_dpll_id);
 }
 
 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -11297,7 +11436,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
  *
  * Returns true or false.
  */
-static bool intel_wm_need_update(struct intel_plane_state *cur,
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
 				 struct intel_plane_state *new)
 {
 	/* Update watermarks on tiling or size changes. */
@@ -11329,33 +11468,28 @@ static bool needs_scaling(const struct intel_plane_state *state)
 }
 
 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
-				    struct drm_crtc_state *crtc_state,
+				    struct intel_crtc_state *crtc_state,
 				    const struct intel_plane_state *old_plane_state,
-				    struct drm_plane_state *plane_state)
+				    struct intel_plane_state *plane_state)
 {
-	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
-	struct drm_crtc *crtc = crtc_state->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *plane = to_intel_plane(plane_state->plane);
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	bool mode_changed = needs_modeset(crtc_state);
 	bool was_crtc_enabled = old_crtc_state->base.active;
-	bool is_crtc_enabled = crtc_state->active;
+	bool is_crtc_enabled = crtc_state->base.active;
 	bool turn_off, turn_on, visible, was_visible;
-	struct drm_framebuffer *fb = plane_state->fb;
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	int ret;
 
 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
-		ret = skl_update_scaler_plane(
-			to_intel_crtc_state(crtc_state),
-			to_intel_plane_state(plane_state));
+		ret = skl_update_scaler_plane(crtc_state, plane_state);
 		if (ret)
 			return ret;
 	}
 
 	was_visible = old_plane_state->base.visible;
-	visible = plane_state->visible;
+	visible = plane_state->base.visible;
 
 	if (!was_crtc_enabled && WARN_ON(was_visible))
 		was_visible = false;
@@ -11371,22 +11505,22 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 	 * only combine the results from all planes in the current place?
 	 */
 	if (!is_crtc_enabled) {
-		plane_state->visible = visible = false;
-		to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
-		to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
+		plane_state->base.visible = visible = false;
+		crtc_state->active_planes &= ~BIT(plane->id);
+		crtc_state->data_rate[plane->id] = 0;
 	}
 
 	if (!was_visible && !visible)
 		return 0;
 
 	if (fb != old_plane_state->base.fb)
-		pipe_config->fb_changed = true;
+		crtc_state->fb_changed = true;
 
 	turn_off = was_visible && (!visible || mode_changed);
 	turn_on = visible && (!was_visible || mode_changed);
 
 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
-			 intel_crtc->base.base.id, intel_crtc->base.name,
+			 crtc->base.base.id, crtc->base.name,
 			 plane->base.base.id, plane->base.name,
 			 fb ? fb->base.id : -1);
 
@@ -11397,29 +11531,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 
 	if (turn_on) {
 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-			pipe_config->update_wm_pre = true;
+			crtc_state->update_wm_pre = true;
 
 		/* must disable cxsr around plane enable/disable */
 		if (plane->id != PLANE_CURSOR)
-			pipe_config->disable_cxsr = true;
+			crtc_state->disable_cxsr = true;
 	} else if (turn_off) {
 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-			pipe_config->update_wm_post = true;
+			crtc_state->update_wm_post = true;
 
 		/* must disable cxsr around plane enable/disable */
 		if (plane->id != PLANE_CURSOR)
-			pipe_config->disable_cxsr = true;
-	} else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
-					to_intel_plane_state(plane_state))) {
+			crtc_state->disable_cxsr = true;
+	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
 			/* FIXME bollocks */
-			pipe_config->update_wm_pre = true;
-			pipe_config->update_wm_post = true;
+			crtc_state->update_wm_pre = true;
+			crtc_state->update_wm_post = true;
 		}
 	}
 
 	if (visible || was_visible)
-		pipe_config->fb_bits |= plane->frontbuffer_bit;
+		crtc_state->fb_bits |= plane->frontbuffer_bit;
 
 	/*
 	 * ILK/SNB DVSACNTR/Sprite Enable
@@ -11458,8 +11591,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
 	     IS_IVYBRIDGE(dev_priv)) &&
 	    (turn_on || (!needs_scaling(old_plane_state) &&
-			 needs_scaling(to_intel_plane_state(plane_state)))))
-		pipe_config->disable_lp_wm = true;
+			 needs_scaling(plane_state))))
+		crtc_state->disable_lp_wm = true;
 
 	return 0;
 }
@@ -11608,7 +11741,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc_state);
 	int ret;
-	bool mode_changed = needs_modeset(crtc_state);
+	bool mode_changed = needs_modeset(pipe_config);
 
 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
 	    mode_changed && !crtc_state->active)
@@ -12090,6 +12223,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	saved_state->scaler_state = crtc_state->scaler_state;
 	saved_state->shared_dpll = crtc_state->shared_dpll;
 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
+	       sizeof(saved_state->icl_port_dplls));
 	saved_state->crc_enabled = crtc_state->crc_enabled;
 	if (IS_G4X(dev_priv) ||
 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12706,10 +12841,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void verify_wm_state(struct drm_crtc *crtc,
-			    struct drm_crtc_state *new_state)
+static void verify_wm_state(struct intel_crtc *crtc,
+			    struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct skl_hw_state {
 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
@@ -12719,21 +12854,20 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct skl_ddb_allocation *sw_ddb;
 	struct skl_pipe_wm *sw_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const enum pipe pipe = intel_crtc->pipe;
+	const enum pipe pipe = crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
 
-	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
 		return;
 
 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 	if (!hw)
 		return;
 
-	skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
-	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+	sw_wm = &new_crtc_state->wm.skl.optimal;
 
-	skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
+	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
 
 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -12781,7 +12915,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 
 		/* DDB */
 		hw_ddb_entry = &hw->ddb_y[plane];
-		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
+		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -12833,7 +12967,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 
 		/* DDB */
 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
-		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
+		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12847,23 +12981,22 @@ static void verify_wm_state(struct drm_crtc *crtc,
 }
 
 static void
-verify_connector_state(struct drm_device *dev,
-		       struct drm_atomic_state *state,
-		       struct drm_crtc *crtc)
+verify_connector_state(struct intel_atomic_state *state,
+		       struct intel_crtc *crtc)
 {
 	struct drm_connector *connector;
 	struct drm_connector_state *new_conn_state;
 	int i;
 
-	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
 		struct drm_encoder *encoder = connector->encoder;
-		struct drm_crtc_state *crtc_state = NULL;
+		struct intel_crtc_state *crtc_state = NULL;
 
-		if (new_conn_state->crtc != crtc)
+		if (new_conn_state->crtc != &crtc->base)
 			continue;
 
 		if (crtc)
-			crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 
 		intel_connector_verify_state(crtc_state, new_conn_state);
 
@@ -12873,14 +13006,14 @@ verify_connector_state(struct drm_device *dev,
 }
 
 static void
-verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
 {
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *old_conn_state, *new_conn_state;
 	int i;
 
-	for_each_intel_encoder(dev, encoder) {
+	for_each_intel_encoder(&dev_priv->drm, encoder) {
 		bool enabled = false, found = false;
 		enum pipe pipe;
 
@@ -12888,7 +13021,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
 			      encoder->base.base.id,
 			      encoder->base.name);
 
-		for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
 						   new_conn_state, i) {
 			if (old_conn_state->best_encoder == &encoder->base)
 				found = true;
@@ -12922,50 +13055,49 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
 }
 
 static void
-verify_crtc_state(struct drm_crtc *crtc,
-		  struct drm_crtc_state *old_crtc_state,
-		  struct drm_crtc_state *new_crtc_state)
+verify_crtc_state(struct intel_crtc *crtc,
+		  struct intel_crtc_state *old_crtc_state,
+		  struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_encoder *encoder;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *pipe_config, *sw_config;
-	struct drm_atomic_state *old_state;
+	struct intel_crtc_state *pipe_config;
+	struct drm_atomic_state *state;
 	bool active;
 
-	old_state = old_crtc_state->state;
-	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
-	pipe_config = to_intel_crtc_state(old_crtc_state);
+	state = old_crtc_state->base.state;
+	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
+	pipe_config = old_crtc_state;
 	memset(pipe_config, 0, sizeof(*pipe_config));
-	pipe_config->base.crtc = crtc;
-	pipe_config->base.state = old_state;
+	pipe_config->base.crtc = &crtc->base;
+	pipe_config->base.state = state;
 
-	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
 
-	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
 
 	/* we keep both pipes enabled on 830 */
 	if (IS_I830(dev_priv))
-		active = new_crtc_state->active;
+		active = new_crtc_state->base.active;
 
-	I915_STATE_WARN(new_crtc_state->active != active,
+	I915_STATE_WARN(new_crtc_state->base.active != active,
 	     "crtc active state doesn't match with hw state "
-	     "(expected %i, found %i)\n", new_crtc_state->active, active);
+	     "(expected %i, found %i)\n", new_crtc_state->base.active, active);
 
-	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+	I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
 	     "transitional active state does not match atomic hw state "
-	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
+	     "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
 		enum pipe pipe;
 
 		active = encoder->get_hw_state(encoder, &pipe);
-		I915_STATE_WARN(active != new_crtc_state->active,
+		I915_STATE_WARN(active != new_crtc_state->base.active,
 			"[ENCODER:%i] active %i with crtc active %i\n",
-			encoder->base.base.id, active, new_crtc_state->active);
+			encoder->base.base.id, active, new_crtc_state->base.active);
 
-		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+		I915_STATE_WARN(active && crtc->pipe != pipe,
 				"Encoder connected to wrong pipe %c\n",
 				pipe_name(pipe));
 
@@ -12975,16 +13107,16 @@ verify_crtc_state(struct drm_crtc *crtc,
 
 	intel_crtc_compute_pixel_rate(pipe_config);
 
-	if (!new_crtc_state->active)
+	if (!new_crtc_state->base.active)
 		return;
 
 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
 
-	sw_config = to_intel_crtc_state(new_crtc_state);
-	if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
+	if (!intel_pipe_config_compare(new_crtc_state,
+				       pipe_config, false)) {
 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
-		intel_dump_pipe_config(sw_config, NULL, "[sw state]");
+		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
 	}
 }
 
@@ -13004,8 +13136,8 @@ intel_verify_planes(struct intel_atomic_state *state)
 static void
 verify_single_dpll_state(struct drm_i915_private *dev_priv,
 			 struct intel_shared_dpll *pll,
-			 struct drm_crtc *crtc,
-			 struct drm_crtc_state *new_state)
+			 struct intel_crtc *crtc,
+			 struct intel_crtc_state *new_crtc_state)
 {
 	struct intel_dpll_hw_state dpll_hw_state;
 	unsigned int crtc_mask;
@@ -13035,16 +13167,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	crtc_mask = drm_crtc_mask(crtc);
+	crtc_mask = drm_crtc_mask(&crtc->base);
 
-	if (new_state->active)
+	if (new_crtc_state->base.active)
 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
-				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
 	else
 		I915_STATE_WARN(pll->active_mask & crtc_mask,
 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
-				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
 
 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -13057,51 +13189,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 }
 
 static void
-verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
-			 struct drm_crtc_state *old_crtc_state,
-			 struct drm_crtc_state *new_crtc_state)
+verify_shared_dpll_state(struct intel_crtc *crtc,
+			 struct intel_crtc_state *old_crtc_state,
+			 struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
-	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	if (new_state->shared_dpll)
-		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+	if (new_crtc_state->shared_dpll)
+		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
 
-	if (old_state->shared_dpll &&
-	    old_state->shared_dpll != new_state->shared_dpll) {
-		unsigned int crtc_mask = drm_crtc_mask(crtc);
-		struct intel_shared_dpll *pll = old_state->shared_dpll;
+	if (old_crtc_state->shared_dpll &&
+	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
 
 		I915_STATE_WARN(pll->active_mask & crtc_mask,
 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
-				pipe_name(drm_crtc_index(crtc)));
+				pipe_name(drm_crtc_index(&crtc->base)));
 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
-				pipe_name(drm_crtc_index(crtc)));
+				pipe_name(drm_crtc_index(&crtc->base)));
 	}
 }
 
 static void
-intel_modeset_verify_crtc(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  struct drm_crtc_state *old_state,
-			  struct drm_crtc_state *new_state)
+intel_modeset_verify_crtc(struct intel_crtc *crtc,
+			  struct intel_atomic_state *state,
+			  struct intel_crtc_state *old_crtc_state,
+			  struct intel_crtc_state *new_crtc_state)
 {
-	if (!needs_modeset(new_state) &&
-	    !to_intel_crtc_state(new_state)->update_pipe)
+	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
 		return;
 
-	verify_wm_state(crtc, new_state);
-	verify_connector_state(crtc->dev, state, crtc);
-	verify_crtc_state(crtc, old_state, new_state);
-	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+	verify_wm_state(crtc, new_crtc_state);
+	verify_connector_state(state, crtc);
+	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
 }
 
 static void
-verify_disabled_dpll_state(struct drm_device *dev)
+verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int i;
 
 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13109,12 +13237,12 @@ verify_disabled_dpll_state(struct drm_device *dev)
 }
 
 static void
-intel_modeset_verify_disabled(struct drm_device *dev,
-			      struct drm_atomic_state *state)
+intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+			      struct intel_atomic_state *state)
 {
-	verify_encoder_state(dev, state);
-	verify_connector_state(dev, state, NULL);
-	verify_disabled_dpll_state(dev);
+	verify_encoder_state(dev_priv, state);
+	verify_connector_state(state, NULL);
+	verify_disabled_dpll_state(dev_priv);
 }
 
 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
@@ -13168,27 +13296,18 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
 	int i;
 
 	if (!dev_priv->display.crtc_compute_clock)
 		return;
 
-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-					    new_crtc_state, i) {
-		struct intel_shared_dpll *old_dpll =
-			old_crtc_state->shared_dpll;
-
-		if (!needs_modeset(&new_crtc_state->base))
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+		if (!needs_modeset(new_crtc_state))
 			continue;
 
-		new_crtc_state->shared_dpll = NULL;
-
-		if (!old_dpll)
-			continue;
-
-		intel_release_shared_dpll(old_dpll, crtc, &state->base);
+		intel_release_shared_dplls(state, crtc);
 	}
 }
 
@@ -13210,7 +13329,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
 	/* look at all crtc's that are going to be enabled in during modeset */
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->base.active ||
-		    !needs_modeset(&crtc_state->base))
+		    !needs_modeset(crtc_state))
 			continue;
 
 		if (first_crtc_state) {
@@ -13235,7 +13354,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
 
 		if (!crtc_state->base.active ||
-		    needs_modeset(&crtc_state->base))
+		    needs_modeset(crtc_state))
 			continue;
 
 		/* 2 or more enabled crtcs means no need for w/a */
@@ -13285,7 +13404,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
-		if (!crtc_state->active || needs_modeset(crtc_state))
+		if (!crtc_state->active || needs_modeset(to_intel_crtc_state(crtc_state)))
 			continue;
 
 		crtc_state->mode_changed = true;
@@ -13362,12 +13481,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
 		}
 
 		if (is_power_of_2(state->active_crtcs)) {
-			struct drm_crtc *crtc;
-			struct drm_crtc_state *crtc_state;
+			struct intel_crtc *crtc;
+			struct intel_crtc_state *crtc_state;
 
 			pipe = ilog2(state->active_crtcs);
-			crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
-			crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
+			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 			if (crtc_state && needs_modeset(crtc_state))
 				pipe = INVALID_PIPE;
 		} else {
@@ -13478,7 +13597,7 @@ static int intel_atomic_check(struct drm_device *dev,
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(&new_crtc_state->base))
+		if (!needs_modeset(new_crtc_state))
 			continue;
 
 		if (!new_crtc_state->base.enable) {
@@ -13492,7 +13611,7 @@ static int intel_atomic_check(struct drm_device *dev,
 
 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
 
-		if (needs_modeset(&new_crtc_state->base))
+		if (needs_modeset(new_crtc_state))
 			any_ms = true;
 	}
 
@@ -13527,12 +13646,12 @@ static int intel_atomic_check(struct drm_device *dev,
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (!needs_modeset(&new_crtc_state->base) &&
+		if (!needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->update_pipe)
 			continue;
 
 		intel_dump_pipe_config(new_crtc_state, state,
-				       needs_modeset(&new_crtc_state->base) ?
+				       needs_modeset(new_crtc_state) ?
 				       "[modeset]" : "[fastset]");
 	}
 
@@ -13567,60 +13686,57 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
 	if (!vblank->max_vblank_count)
 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
 
-	return dev->driver->get_vblank_counter(dev, crtc->pipe);
+	return crtc->base.funcs->get_vblank_counter(&crtc->base);
 }
 
-static void intel_update_crtc(struct drm_crtc *crtc,
-			      struct drm_atomic_state *state,
-			      struct drm_crtc_state *old_crtc_state,
-			      struct drm_crtc_state *new_crtc_state)
+static void intel_update_crtc(struct intel_crtc *crtc,
+			      struct intel_atomic_state *state,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct intel_crtc_state *new_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = state->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
 	bool modeset = needs_modeset(new_crtc_state);
 	struct intel_plane_state *new_plane_state =
-		intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
-						 to_intel_plane(crtc->primary));
+		intel_atomic_get_new_plane_state(state,
+						 to_intel_plane(crtc->base.primary));
 
 	if (modeset) {
-		update_scanline_offset(pipe_config);
-		dev_priv->display.crtc_enable(pipe_config, state);
+		update_scanline_offset(new_crtc_state);
+		dev_priv->display.crtc_enable(new_crtc_state, state);
 
 		/* vblanks work again, re-enable pipe CRC. */
-		intel_crtc_enable_pipe_crc(intel_crtc);
+		intel_crtc_enable_pipe_crc(crtc);
 	} else {
-		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
-				       pipe_config);
+		intel_pre_plane_update(old_crtc_state, new_crtc_state);
 
-		if (pipe_config->update_pipe)
-			intel_encoders_update_pipe(crtc, pipe_config, state);
+		if (new_crtc_state->update_pipe)
+			intel_encoders_update_pipe(crtc, new_crtc_state, state);
 	}
 
-	if (pipe_config->update_pipe && !pipe_config->enable_fbc)
-		intel_fbc_disable(intel_crtc);
+	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+		intel_fbc_disable(crtc);
 	else if (new_plane_state)
-		intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
+		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
 
-	intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+	intel_begin_crtc_commit(state, crtc);
 
 	if (INTEL_GEN(dev_priv) >= 9)
-		skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+		skl_update_planes_on_crtc(state, crtc);
 	else
-		i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+		i9xx_update_planes_on_crtc(state, crtc);
 
-	intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+	intel_finish_crtc_commit(state, crtc);
 }
 
-static void intel_update_crtcs(struct drm_atomic_state *state)
+static void intel_update_crtcs(struct intel_atomic_state *state)
 {
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
 	int i;
 
-	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		if (!new_crtc_state->active)
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		if (!new_crtc_state->base.active)
 			continue;
 
 		intel_update_crtc(crtc, state, old_crtc_state,
@@ -13628,26 +13744,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state)
 	}
 }
 
-static void skl_update_crtcs(struct drm_atomic_state *state)
+static void skl_update_crtcs(struct intel_atomic_state *state)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
-	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-	struct intel_crtc_state *cstate;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
 	unsigned int updated = 0;
 	bool progress;
 	enum pipe pipe;
 	int i;
 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-	u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
+	u8 required_slices = state->wm_results.ddb.enabled_slices;
 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
 
-	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
 		/* ignore allocations for crtc's that have been turned off. */
-		if (new_crtc_state->active)
-			entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+		if (new_crtc_state->base.active)
+			entries[i] = old_crtc_state->wm.skl.ddb;
 
 	/* If 2nd DBuf slice required, enable it here */
 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -13662,24 +13775,22 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 	do {
 		progress = false;
 
-		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 			bool vbl_wait = false;
-			unsigned int cmask = drm_crtc_mask(crtc);
+			unsigned int cmask = drm_crtc_mask(&crtc->base);
 
-			intel_crtc = to_intel_crtc(crtc);
-			cstate = to_intel_crtc_state(new_crtc_state);
-			pipe = intel_crtc->pipe;
+			pipe = crtc->pipe;
 
-			if (updated & cmask || !cstate->base.active)
+			if (updated & cmask || !new_crtc_state->base.active)
 				continue;
 
-			if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
+			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
 							entries,
 							INTEL_INFO(dev_priv)->num_pipes, i))
 				continue;
 
 			updated |= cmask;
-			entries[i] = cstate->wm.skl.ddb;
+			entries[i] = new_crtc_state->wm.skl.ddb;
 
 			/*
 			 * If this is an already active pipe, it's DDB changed,
@@ -13687,10 +13798,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 			 * then we need to wait for a vblank to pass for the
 			 * new ddb allocation to take effect.
 			 */
-			if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
-						 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
-			    !new_crtc_state->active_changed &&
-			    intel_state->wm_results.dirty_pipes != updated)
+			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+						 &old_crtc_state->wm.skl.ddb) &&
+			    !new_crtc_state->base.active_changed &&
+			    state->wm_results.dirty_pipes != updated)
 				vbl_wait = true;
 
 			intel_update_crtc(crtc, state, old_crtc_state,
@@ -13763,57 +13874,50 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
 	intel_atomic_helper_free_state(i915);
 }
 
-static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_device *dev = state->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
-	struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
-	struct drm_crtc *crtc;
-	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+	struct intel_crtc *crtc;
 	u64 put_domains[I915_MAX_PIPES] = {};
 	intel_wakeref_t wakeref = 0;
 	int i;
 
-	intel_atomic_commit_fence_wait(intel_state);
+	intel_atomic_commit_fence_wait(state);
 
-	drm_atomic_helper_wait_for_dependencies(state);
+	drm_atomic_helper_wait_for_dependencies(&state->base);
 
-	if (intel_state->modeset)
+	if (state->modeset)
 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
 
-	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
-		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-		intel_crtc = to_intel_crtc(crtc);
-
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		if (needs_modeset(new_crtc_state) ||
-		    to_intel_crtc_state(new_crtc_state)->update_pipe) {
+		    new_crtc_state->update_pipe) {
 
-			put_domains[intel_crtc->pipe] =
+			put_domains[crtc->pipe] =
 				modeset_get_crtc_power_domains(crtc,
-					new_intel_crtc_state);
+					new_crtc_state);
 		}
 
 		if (!needs_modeset(new_crtc_state))
 			continue;
 
-		intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
+		intel_pre_plane_update(old_crtc_state, new_crtc_state);
 
-		if (old_crtc_state->active) {
-			intel_crtc_disable_planes(intel_state, intel_crtc);
+		if (old_crtc_state->base.active) {
+			intel_crtc_disable_planes(state, crtc);
 
 			/*
 			 * We need to disable pipe CRC before disabling the pipe,
 			 * or we race against vblank off.
 			 */
-			intel_crtc_disable_pipe_crc(intel_crtc);
+			intel_crtc_disable_pipe_crc(crtc);
 
-			dev_priv->display.crtc_disable(old_intel_crtc_state, state);
-			intel_crtc->active = false;
-			intel_fbc_disable(intel_crtc);
-			intel_disable_shared_dpll(old_intel_crtc_state);
+			dev_priv->display.crtc_disable(old_crtc_state, state);
+			crtc->active = false;
+			intel_fbc_disable(crtc);
+			intel_disable_shared_dpll(old_crtc_state);
 
 			/*
 			 * Underruns don't always raise
@@ -13823,25 +13927,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 			intel_check_pch_fifo_underruns(dev_priv);
 
 			/* FIXME unify this for all platforms */
-			if (!new_crtc_state->active &&
+			if (!new_crtc_state->base.active &&
 			    !HAS_GMCH(dev_priv) &&
 			    dev_priv->display.initial_watermarks)
-				dev_priv->display.initial_watermarks(intel_state,
-								     new_intel_crtc_state);
+				dev_priv->display.initial_watermarks(state,
+								     new_crtc_state);
 		}
 	}
 
-	/* FIXME: Eventually get rid of our intel_crtc->config pointer */
-	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
+	/* FIXME: Eventually get rid of our crtc->config pointer */
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+		crtc->config = new_crtc_state;
 
-	if (intel_state->modeset) {
-		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+	if (state->modeset) {
+		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
 
 		intel_set_cdclk_pre_plane_update(dev_priv,
-						 &intel_state->cdclk.actual,
+						 &state->cdclk.actual,
 						 &dev_priv->cdclk.actual,
-						 intel_state->cdclk.pipe);
+						 state->cdclk.pipe);
 
 		/*
 		 * SKL workaround: bspec recommends we disable the SAGV when we
@@ -13850,31 +13954,37 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!intel_can_enable_sagv(state))
 			intel_disable_sagv(dev_priv);
 
-		intel_modeset_verify_disabled(dev, state);
+		intel_modeset_verify_disabled(dev_priv, state);
 	}
 
 	/* Complete the events for pipes that have now been disabled */
-	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		bool modeset = needs_modeset(new_crtc_state);
 
 		/* Complete events for now disable pipes here. */
-		if (modeset && !new_crtc_state->active && new_crtc_state->event) {
+		if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
 			spin_lock_irq(&dev->event_lock);
-			drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+			drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
 			spin_unlock_irq(&dev->event_lock);
 
-			new_crtc_state->event = NULL;
+			new_crtc_state->base.event = NULL;
 		}
 	}
 
+	if (state->modeset)
+		intel_encoders_update_prepare(state);
+
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	dev_priv->display.update_crtcs(state);
 
-	if (intel_state->modeset)
+	if (state->modeset) {
+		intel_encoders_update_complete(state);
+
 		intel_set_cdclk_post_plane_update(dev_priv,
-						  &intel_state->cdclk.actual,
+						  &state->cdclk.actual,
 						  &dev_priv->cdclk.actual,
-						  intel_state->cdclk.pipe);
+						  state->cdclk.pipe);
+	}
 
 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
 	 * already, but still need the state for the delayed optimization. To
@@ -13885,16 +13995,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 * - switch over to the vblank wait helper in the core after that since
 	 *   we don't need out special handling any more.
 	 */
-	drm_atomic_helper_wait_for_flip_done(dev, state);
+	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
 
-	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
-		if (new_crtc_state->active &&
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+		if (new_crtc_state->base.active &&
 		    !needs_modeset(new_crtc_state) &&
-		    (new_intel_crtc_state->base.color_mgmt_changed ||
-		     new_intel_crtc_state->update_pipe))
-			intel_color_load_luts(new_intel_crtc_state);
+		    (new_crtc_state->base.color_mgmt_changed ||
+		     new_crtc_state->update_pipe))
+			intel_color_load_luts(new_crtc_state);
 	}
 
 	/*
@@ -13904,16 +14012,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 *
 	 * TODO: Move this (and other cleanup) to an async worker eventually.
 	 */
-	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
+	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
 		if (dev_priv->display.optimize_watermarks)
-			dev_priv->display.optimize_watermarks(intel_state,
-							      new_intel_crtc_state);
+			dev_priv->display.optimize_watermarks(state,
+							      new_crtc_state);
 	}
 
-	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		intel_post_plane_update(old_crtc_state);
 
 		if (put_domains[i])
 			modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -13921,15 +14027,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
 	}
 
-	if (intel_state->modeset)
-		intel_verify_planes(intel_state);
+	if (state->modeset)
+		intel_verify_planes(state);
 
-	if (intel_state->modeset && intel_can_enable_sagv(state))
+	if (state->modeset && intel_can_enable_sagv(state))
 		intel_enable_sagv(dev_priv);
 
-	drm_atomic_helper_commit_hw_done(state);
+	drm_atomic_helper_commit_hw_done(&state->base);
 
-	if (intel_state->modeset) {
+	if (state->modeset) {
 		/* As one of the primary mmio accessors, KMS has a high
 		 * likelihood of triggering bugs in unclaimed access. After we
 		 * finish modesetting, see if an error has been flagged, and if
@@ -13939,7 +14045,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
 	}
-	intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
 
 	/*
 	 * Defer the cleanup of the old state to a separate worker to not
@@ -13949,14 +14055,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	 * schedule point (cond_resched()) here anyway to keep latencies
 	 * down.
 	 */
-	INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
-	queue_work(system_highpri_wq, &state->commit_work);
+	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+	queue_work(system_highpri_wq, &state->base.commit_work);
 }
 
 static void intel_atomic_commit_work(struct work_struct *work)
 {
-	struct drm_atomic_state *state =
-		container_of(work, struct drm_atomic_state, commit_work);
+	struct intel_atomic_state *state =
+		container_of(work, struct intel_atomic_state, base.commit_work);
 
 	intel_atomic_commit_tail(state);
 }
@@ -14099,24 +14205,12 @@ static int intel_atomic_commit(struct drm_device *dev,
 	} else {
 		if (intel_state->modeset)
 			flush_workqueue(dev_priv->modeset_wq);
-		intel_atomic_commit_tail(state);
+		intel_atomic_commit_tail(intel_state);
 	}
 
 	return 0;
 }
 
-static const struct drm_crtc_funcs intel_crtc_funcs = {
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
-	.set_config = drm_atomic_helper_set_config,
-	.destroy = intel_crtc_destroy,
-	.page_flip = drm_atomic_helper_page_flip,
-	.atomic_duplicate_state = intel_crtc_duplicate_state,
-	.atomic_destroy_state = intel_crtc_destroy_state,
-	.set_crc_source = intel_crtc_set_crc_source,
-	.verify_crc_source = intel_crtc_verify_crc_source,
-	.get_crc_sources = intel_crtc_get_crc_sources,
-};
-
 struct wait_rps_boost {
 	struct wait_queue_entry wait;
 
@@ -14250,9 +14344,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	int ret;
 
 	if (old_obj) {
-		struct drm_crtc_state *crtc_state =
-			drm_atomic_get_new_crtc_state(new_state->state,
-						      plane->state->crtc);
+		struct intel_crtc_state *crtc_state =
+			intel_atomic_get_new_crtc_state(intel_state,
+							to_intel_crtc(plane->state->crtc));
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14413,7 +14507,7 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
 		intel_atomic_get_old_crtc_state(state, crtc);
 	struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	bool modeset = needs_modeset(&new_crtc_state->base);
+	bool modeset = needs_modeset(new_crtc_state);
 
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(new_crtc_state);
@@ -14466,7 +14560,7 @@ static void intel_finish_crtc_commit(struct intel_atomic_state *state,
 	intel_pipe_update_end(new_crtc_state);
 
 	if (new_crtc_state->update_pipe &&
-	    !needs_modeset(&new_crtc_state->base) &&
+	    !needs_modeset(new_crtc_state) &&
 	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
 }
@@ -14580,7 +14674,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	 * When crtc is inactive or there is a modeset pending,
 	 * wait for it to complete in the slowpath
 	 */
-	if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+	if (!crtc_state->base.active || needs_modeset(crtc_state) ||
 	    crtc_state->update_pipe)
 		goto slow;
 
@@ -14910,8 +15004,76 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 	scaler_state->scaler_id = -1;
 }
 
+#define INTEL_CRTC_FUNCS \
+	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
+	.set_config = drm_atomic_helper_set_config, \
+	.destroy = intel_crtc_destroy, \
+	.page_flip = drm_atomic_helper_page_flip, \
+	.atomic_duplicate_state = intel_crtc_duplicate_state, \
+	.atomic_destroy_state = intel_crtc_destroy_state, \
+	.set_crc_source = intel_crtc_set_crc_source, \
+	.verify_crc_source = intel_crtc_verify_crc_source, \
+	.get_crc_sources = intel_crtc_get_crc_sources
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = bdw_enable_vblank,
+	.disable_vblank = bdw_disable_vblank,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = ilk_enable_vblank,
+	.disable_vblank = ilk_disable_vblank,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = g4x_get_vblank_counter,
+	.enable_vblank = i965_enable_vblank,
+	.disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i965_enable_vblank,
+	.disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i945gm_enable_vblank,
+	.disable_vblank = i945gm_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i8xx_enable_vblank,
+	.disable_vblank = i8xx_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+	INTEL_CRTC_FUNCS,
+
+	/* no hw vblank counter */
+	.enable_vblank = i8xx_enable_vblank,
+	.disable_vblank = i8xx_disable_vblank,
+};
+
 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
+	const struct drm_crtc_funcs *funcs;
 	struct intel_crtc *intel_crtc;
 	struct intel_crtc_state *crtc_state = NULL;
 	struct intel_plane *primary = NULL;
@@ -14955,10 +15117,28 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 	}
 	intel_crtc->plane_ids_mask |= BIT(cursor->id);
 
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv) ||
+		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+			funcs = &g4x_crtc_funcs;
+		else if (IS_GEN(dev_priv, 4))
+			funcs = &i965_crtc_funcs;
+		else if (IS_I945GM(dev_priv))
+			funcs = &i945gm_crtc_funcs;
+		else if (IS_GEN(dev_priv, 3))
+			funcs = &i915_crtc_funcs;
+		else
+			funcs = &i8xx_crtc_funcs;
+	} else {
+		if (INTEL_GEN(dev_priv) >= 8)
+			funcs = &bdw_crtc_funcs;
+		else
+			funcs = &ilk_crtc_funcs;
+	}
+
 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
 					&primary->base, &cursor->base,
-					&intel_crtc_funcs,
-					"pipe %c", pipe_name(pipe));
+					funcs, "pipe %c", pipe_name(pipe));
 	if (ret)
 		goto fail;
 
@@ -15775,8 +15955,8 @@ static void sanitize_watermarks(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *state;
 	struct intel_atomic_state *intel_state;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *cstate;
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *crtc_state;
 	struct drm_modeset_acquire_ctx ctx;
 	int ret;
 	int i;
@@ -15831,13 +16011,11 @@ static void sanitize_watermarks(struct drm_device *dev)
 	}
 
 	/* Write calculated watermark values back */
-	for_each_new_crtc_in_state(state, crtc, cstate, i) {
-		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+		crtc_state->wm.need_postvbl_update = true;
+		dev_priv->display.optimize_watermarks(intel_state, crtc_state);
 
-		cs->wm.need_postvbl_update = true;
-		dev_priv->display.optimize_watermarks(intel_state, cs);
-
-		to_intel_crtc_state(crtc->state)->wm = cs->wm;
+		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
 	}
 
 put_state:
@@ -16495,6 +16673,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
 		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
 							&pll->state.hw_state);
+
+		if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
+		    pll->info->id == DPLL_ID_EHL_DPLL4) {
+			pll->wakeref = intel_display_power_get(dev_priv,
+							       POWER_DOMAIN_DPLL_DC_OFF);
+		}
+
 		pll->state.crtc_mask = 0;
 		for_each_intel_crtc(dev, crtc) {
 			struct intel_crtc_state *crtc_state =
@@ -16744,6 +16929,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	intel_modeset_readout_hw_state(dev);
 
 	/* HW state is read out, now we need to sanitize this mess. */
+
+	/* Sanitize the TypeC port mode upfront, encoders depend on this */
+	for_each_intel_encoder(dev, encoder) {
+		/* We need to sanitize only the MST primary port. */
+		if (encoder->type != INTEL_OUTPUT_DP_MST &&
+		    intel_port_is_tc(dev_priv, encoder->port))
+			intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+	}
+
 	get_encoder_power_domains(dev_priv);
 
 	if (HAS_PCH_IBX(dev_priv))
@@ -16804,7 +16998,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		u64 put_domains;
 
 		crtc_state = to_intel_crtc_state(crtc->base.state);
-		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
+		put_domains = modeset_get_crtc_power_domains(crtc, crtc_state);
 		if (WARN_ON(put_domains))
 			modeset_put_power_domains(dev_priv, put_domains);
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index ee6b819..d296556 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -189,10 +189,9 @@ enum tc_port {
 	I915_MAX_TC_PORTS
 };
 
-enum tc_port_type {
-	TC_PORT_UNKNOWN = 0,
-	TC_PORT_TYPEC,
-	TC_PORT_TBT,
+enum tc_port_mode {
+	TC_PORT_TBT_ALT,
+	TC_PORT_DP_ALT,
 	TC_PORT_LEGACY,
 };
 
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c93ad51..7437fc7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -17,6 +17,7 @@
 #include "intel_drv.h"
 #include "intel_hotplug.h"
 #include "intel_sideband.h"
+#include "intel_tc.h"
 
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 					 enum i915_power_well_id power_well_id);
@@ -117,6 +118,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
 		return "MODESET";
 	case POWER_DOMAIN_GT_IRQ:
 		return "GT_IRQ";
+	case POWER_DOMAIN_DPLL_DC_OFF:
+		return "DPLL_DC_OFF";
 	default:
 		MISSING_CASE(domain);
 		return "?";
@@ -269,11 +272,17 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 	int pw_idx = power_well->desc->hsw.idx;
 
 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
-	WARN_ON(intel_wait_for_register(&dev_priv->uncore,
-					regs->driver,
-					HSW_PWR_WELL_CTL_STATE(pw_idx),
-					HSW_PWR_WELL_CTL_STATE(pw_idx),
-					1));
+	if (intel_wait_for_register(&dev_priv->uncore,
+				    regs->driver,
+				    HSW_PWR_WELL_CTL_STATE(pw_idx),
+				    HSW_PWR_WELL_CTL_STATE(pw_idx),
+				    1)) {
+		DRM_DEBUG_KMS("%s power well enable timeout\n",
+			      power_well->desc->name);
+
+		/* An AUX timeout is expected if the TBT DP tunnel is down. */
+		WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+	}
 }
 
 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
@@ -438,13 +447,91 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 #define ICL_AUX_PW_TO_CH(pw_idx)	\
 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
+	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+				     struct i915_power_well *power_well)
+{
+	int pw_idx = power_well->desc->hsw.idx;
+
+	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+						 ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
+
+static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
+				      struct i915_power_well *power_well)
+{
+	int refs = hweight64(power_well->desc->domains &
+			     async_put_domains_mask(&dev_priv->power_domains));
+
+	WARN_ON(refs > power_well->count);
+
+	return refs;
+}
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+	struct intel_digital_port *dig_port = NULL;
+	struct intel_encoder *encoder;
+
+	/* Bypass the check if all references are released asynchronously */
+	if (power_well_async_ref_count(dev_priv, power_well) ==
+	    power_well->count)
+		return;
+
+	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+
+	for_each_intel_encoder(&dev_priv->drm, encoder) {
+		if (!intel_port_is_tc(dev_priv, encoder->port))
+			continue;
+
+		/* We'll check the MST primary port */
+		if (encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		dig_port = enc_to_dig_port(&encoder->base);
+		if (WARN_ON(!dig_port))
+			continue;
+
+		if (dig_port->aux_ch != aux_ch) {
+			dig_port = NULL;
+			continue;
+		}
+
+		break;
+	}
+
+	if (WARN_ON(!dig_port))
+		return;
+
+	WARN_ON(!intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+}
+
+#endif
+
 static void
 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 				 struct i915_power_well *power_well)
 {
-	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
 	u32 val;
 
+	icl_tc_port_assert_ref_held(dev_priv, power_well);
+
 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
 	val &= ~DP_AUX_CH_CTL_TBT_IO;
 	if (power_well->desc->hsw.is_tc_tbt)
@@ -454,6 +541,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 	hsw_power_well_enable(dev_priv, power_well);
 }
 
+static void
+icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+				  struct i915_power_well *power_well)
+{
+	icl_tc_port_assert_ref_held(dev_priv, power_well);
+
+	hsw_power_well_disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -1064,7 +1160,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	/* make sure we're done processing display irqs */
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 
 	intel_power_sequencer_reset(dev_priv);
 
@@ -2361,6 +2457,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	ICL_PW_2_POWER_DOMAINS |			\
 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
+	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
 	BIT_ULL(POWER_DOMAIN_INIT))
 
 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
@@ -3106,7 +3203,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
 	.sync_hw = hsw_power_well_sync_hw,
 	.enable = icl_tc_phy_aux_power_well_enable,
-	.disable = hsw_power_well_disable,
+	.disable = icl_tc_phy_aux_power_well_disable,
 	.is_enabled = hsw_power_well_enabled,
 };
 
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index ff57b0a..8f43f70 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -59,6 +59,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_GMBUS,
 	POWER_DOMAIN_MODESET,
 	POWER_DOMAIN_GT_IRQ,
+	POWER_DOMAIN_DPLL_DC_OFF,
 	POWER_DOMAIN_INIT,
 
 	POWER_DOMAIN_NUM,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 4336df4..0bdb7ec 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -62,6 +62,7 @@
 #include "intel_panel.h"
 #include "intel_psr.h"
 #include "intel_sideband.h"
+#include "intel_tc.h"
 #include "intel_vdsc.h"
 
 #define DP_DPRX_ESI_LEN 14
@@ -211,46 +212,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 }
 
-static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-	intel_wakeref_t wakeref;
-	u32 lane_info;
-
-	if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
-		return 4;
-
-	lane_info = 0;
-	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
-		lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
-			     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
-				DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
-	switch (lane_info) {
-	default:
-		MISSING_CASE(lane_info);
-	case 1:
-	case 2:
-	case 4:
-	case 8:
-		return 1;
-	case 3:
-	case 12:
-		return 2;
-	case 15:
-		return 4;
-	}
-}
-
 /* Theoretical max between source and sink */
 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	int source_max = intel_dig_port->max_lanes;
 	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
-	int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
+	int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
 
 	return min3(source_max, sink_max, fia_max);
 }
@@ -1208,7 +1176,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
 	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 
-	if (intel_dig_port->tc_type == TC_PORT_TBT)
+	if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
 		ret |= DP_AUX_CH_CTL_TBT_IO;
 
 	return ret;
@@ -1224,6 +1192,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	struct drm_i915_private *i915 =
 			to_i915(intel_dig_port->base.base.dev);
 	struct intel_uncore *uncore = &i915->uncore;
+	bool is_tc_port = intel_port_is_tc(i915, intel_dig_port->base.port);
 	i915_reg_t ch_ctl, ch_data[5];
 	u32 aux_clock_divider;
 	enum intel_display_power_domain aux_domain =
@@ -1239,6 +1208,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
 		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
 
+	if (is_tc_port)
+		intel_tc_port_lock(intel_dig_port);
+
 	aux_wakeref = intel_display_power_get(i915, aux_domain);
 	pps_wakeref = pps_lock(intel_dp);
 
@@ -1391,6 +1363,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	pps_unlock(intel_dp, pps_wakeref);
 	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
 
+	if (is_tc_port)
+		intel_tc_port_unlock(intel_dig_port);
+
 	return ret;
 }
 
@@ -5232,195 +5207,6 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
 	return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
 }
 
-static const char *tc_type_name(enum tc_port_type type)
-{
-	static const char * const names[] = {
-		[TC_PORT_UNKNOWN] = "unknown",
-		[TC_PORT_LEGACY] = "legacy",
-		[TC_PORT_TYPEC] = "typec",
-		[TC_PORT_TBT] = "tbt",
-	};
-
-	if (WARN_ON(type >= ARRAY_SIZE(names)))
-		type = TC_PORT_UNKNOWN;
-
-	return names[type];
-}
-
-static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
-				    struct intel_digital_port *intel_dig_port,
-				    bool is_legacy, bool is_typec, bool is_tbt)
-{
-	enum port port = intel_dig_port->base.port;
-	enum tc_port_type old_type = intel_dig_port->tc_type;
-
-	WARN_ON(is_legacy + is_typec + is_tbt != 1);
-
-	if (is_legacy)
-		intel_dig_port->tc_type = TC_PORT_LEGACY;
-	else if (is_typec)
-		intel_dig_port->tc_type = TC_PORT_TYPEC;
-	else if (is_tbt)
-		intel_dig_port->tc_type = TC_PORT_TBT;
-	else
-		return;
-
-	/* Types are not supposed to be changed at runtime. */
-	WARN_ON(old_type != TC_PORT_UNKNOWN &&
-		old_type != intel_dig_port->tc_type);
-
-	if (old_type != intel_dig_port->tc_type)
-		DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
-			      tc_type_name(intel_dig_port->tc_type));
-}
-
-/*
- * This function implements the first part of the Connect Flow described by our
- * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
- * lanes, EDID, etc) is done as needed in the typical places.
- *
- * Unlike the other ports, type-C ports are not available to use as soon as we
- * get a hotplug. The type-C PHYs can be shared between multiple controllers:
- * display, USB, etc. As a result, handshaking through FIA is required around
- * connect and disconnect to cleanly transfer ownership with the controller and
- * set the type-C power state.
- *
- * We could opt to only do the connect flow when we actually try to use the AUX
- * channels or do a modeset, then immediately run the disconnect flow after
- * usage, but there are some implications on this for a dynamic environment:
- * things may go away or change behind our backs. So for now our driver is
- * always trying to acquire ownership of the controller as soon as it gets an
- * interrupt (or polls state and sees a port is connected) and only gives it
- * back when it sees a disconnect. Implementation of a more fine-grained model
- * will require a lot of coordination with user space and thorough testing for
- * the extra possible cases.
- */
-static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
-			       struct intel_digital_port *dig_port)
-{
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-	u32 val;
-
-	if (dig_port->tc_type != TC_PORT_LEGACY &&
-	    dig_port->tc_type != TC_PORT_TYPEC)
-		return true;
-
-	val = I915_READ(PORT_TX_DFLEXDPPMS);
-	if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
-		DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
-		WARN_ON(dig_port->tc_legacy_port);
-		return false;
-	}
-
-	/*
-	 * This function may be called many times in a row without an HPD event
-	 * in between, so try to avoid the write when we can.
-	 */
-	val = I915_READ(PORT_TX_DFLEXDPCSSS);
-	if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
-		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
-		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
-	}
-
-	/*
-	 * Now we have to re-check the live state, in case the port recently
-	 * became disconnected. Not necessary for legacy mode.
-	 */
-	if (dig_port->tc_type == TC_PORT_TYPEC &&
-	    !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
-		DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
-		icl_tc_phy_disconnect(dev_priv, dig_port);
-		return false;
-	}
-
-	return true;
-}
-
-/*
- * See the comment at the connect function. This implements the Disconnect
- * Flow.
- */
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
-			   struct intel_digital_port *dig_port)
-{
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-
-	if (dig_port->tc_type == TC_PORT_UNKNOWN)
-		return;
-
-	/*
-	 * TBT disconnection flow is read the live status, what was done in
-	 * caller.
-	 */
-	if (dig_port->tc_type == TC_PORT_TYPEC ||
-	    dig_port->tc_type == TC_PORT_LEGACY) {
-		u32 val;
-
-		val = I915_READ(PORT_TX_DFLEXDPCSSS);
-		val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
-		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
-	}
-
-	DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
-		      port_name(dig_port->base.port),
-		      tc_type_name(dig_port->tc_type));
-
-	dig_port->tc_type = TC_PORT_UNKNOWN;
-}
-
-/*
- * The type-C ports are different because even when they are connected, they may
- * not be available/usable by the graphics driver: see the comment on
- * icl_tc_phy_connect(). So in our driver instead of adding the additional
- * concept of "usable" and make everything check for "connected and usable" we
- * define a port as "connected" when it is not only connected, but also when it
- * is usable by the rest of the driver. That maintains the old assumption that
- * connected ports are usable, and avoids exposing to the users objects they
- * can't really use.
- */
-static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
-				  struct intel_digital_port *intel_dig_port)
-{
-	enum port port = intel_dig_port->base.port;
-	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
-	bool is_legacy, is_typec, is_tbt;
-	u32 dpsp;
-
-	/*
-	 * Complain if we got a legacy port HPD, but VBT didn't mark the port as
-	 * legacy. Treat the port as legacy from now on.
-	 */
-	if (!intel_dig_port->tc_legacy_port &&
-	    I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
-		DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
-			  port_name(port));
-		intel_dig_port->tc_legacy_port = true;
-	}
-	is_legacy = intel_dig_port->tc_legacy_port;
-
-	/*
-	 * The spec says we shouldn't be using the ISR bits for detecting
-	 * between TC and TBT. We should use DFLEXDPSP.
-	 */
-	dpsp = I915_READ(PORT_TX_DFLEXDPSP);
-	is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
-	is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
-
-	if (!is_legacy && !is_typec && !is_tbt) {
-		icl_tc_phy_disconnect(dev_priv, intel_dig_port);
-
-		return false;
-	}
-
-	icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
-				is_tbt);
-
-	if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
-		return false;
-
-	return true;
-}
-
 static bool icl_digital_port_connected(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -5429,7 +5215,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
 	if (intel_port_is_combophy(dev_priv, encoder->port))
 		return icl_combo_port_connected(dev_priv, dig_port);
 	else if (intel_port_is_tc(dev_priv, encoder->port))
-		return icl_tc_port_connected(dev_priv, dig_port);
+		return intel_tc_port_connected(dig_port);
 	else
 		MISSING_CASE(encoder->hpd_pin);
 
@@ -6834,8 +6620,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 				    const struct intel_crtc_state *crtc_state,
 				    int refresh_rate)
 {
-	struct intel_encoder *encoder;
-	struct intel_digital_port *dig_port = NULL;
 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
@@ -6850,9 +6634,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	dig_port = dp_to_dig_port(intel_dp);
-	encoder = &dig_port->base;
-
 	if (!intel_crtc) {
 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
 		return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index da70b1a..657bbb1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
-			   struct intel_digital_port *dig_port);
 
 static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7ded95a..6b0b734 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
 {
 	struct intel_panel *panel = &intel_connector->panel;
+	struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
 
-	if (!i915_modparams.enable_dpcd_backlight)
+	if (i915_modparams.enable_dpcd_backlight == 0 ||
+	    (i915_modparams.enable_dpcd_backlight == -1 &&
+	    dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
 		return -ENODEV;
 
 	if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 1470c6e..6754c21 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,9 +6,15 @@
 #ifndef __INTEL_DP_MST_H__
 #define __INTEL_DP_MST_H__
 
-struct intel_digital_port;
+#include "intel_drv.h"
 
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+static inline int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+{
+	return intel_dig_port->dp.active_mst_links;
+}
+
 
 #endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d4e7b9..30d7500 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -36,9 +36,10 @@
  * This file provides an abstraction over display PLLs. The function
  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
  * users of a PLL are tracked and that tracking is integrated with the atomic
- * modest interface. During an atomic operation, a PLL can be requested for a
- * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
- * a previously used PLL can be released with intel_release_shared_dpll().
+ * modset interface. During an atomic operation, required PLLs can be reserved
+ * for a given CRTC and encoder configuration by calling
+ * intel_reserve_shared_dplls() and previously reserved PLLs can be released
+ * with intel_release_shared_dplls().
  * Changes to the users are first staged in the atomic state, and then made
  * effective by calling intel_shared_dpll_swap_state() during the atomic
  * commit phase.
@@ -243,17 +244,18 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 }
 
 static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_atomic_state *state,
+		       const struct intel_crtc *crtc,
+		       const struct intel_dpll_hw_state *pll_state,
 		       enum intel_dpll_id range_min,
 		       enum intel_dpll_id range_max)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll, *unused_pll = NULL;
 	struct intel_shared_dpll_state *shared_dpll;
 	enum intel_dpll_id i;
 
-	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 
 	for (i = range_min; i <= range_max; i++) {
 		pll = &dev_priv->shared_dplls[i];
@@ -265,9 +267,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
 			continue;
 		}
 
-		if (memcmp(&crtc_state->dpll_hw_state,
+		if (memcmp(pll_state,
 			   &shared_dpll[i].hw_state,
-			   sizeof(crtc_state->dpll_hw_state)) == 0) {
+			   sizeof(*pll_state)) == 0) {
 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
 				      crtc->base.base.id, crtc->base.name,
 				      pll->info->name,
@@ -289,26 +291,51 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
 }
 
 static void
-intel_reference_shared_dpll(struct intel_shared_dpll *pll,
-			    struct intel_crtc_state *crtc_state)
+intel_reference_shared_dpll(struct intel_atomic_state *state,
+			    const struct intel_crtc *crtc,
+			    const struct intel_shared_dpll *pll,
+			    const struct intel_dpll_hw_state *pll_state)
 {
 	struct intel_shared_dpll_state *shared_dpll;
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	const enum intel_dpll_id id = pll->info->id;
 
-	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 
 	if (shared_dpll[id].crtc_mask == 0)
-		shared_dpll[id].hw_state =
-			crtc_state->dpll_hw_state;
+		shared_dpll[id].hw_state = *pll_state;
 
-	crtc_state->shared_dpll = pll;
 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
 			 pipe_name(crtc->pipe));
 
 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
 }
 
+static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
+					  const struct intel_crtc *crtc,
+					  const struct intel_shared_dpll *pll)
+{
+	struct intel_shared_dpll_state *shared_dpll;
+
+	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+static void intel_put_dpll(struct intel_atomic_state *state,
+			   struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+
+	new_crtc_state->shared_dpll = NULL;
+
+	if (!old_crtc_state->shared_dpll)
+		return;
+
+	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+}
+
 /**
  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
  * @state: atomic state
@@ -421,11 +448,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 	udelay(200);
 }
 
-static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool ibx_get_dpll(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct intel_encoder *encoder)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id i;
@@ -439,18 +467,22 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
 			      crtc->base.base.id, crtc->base.name,
 			      pll->info->name);
 	} else {
-		pll = intel_find_shared_dpll(crtc_state,
+		pll = intel_find_shared_dpll(state, crtc,
+					     &crtc_state->dpll_hw_state,
 					     DPLL_ID_PCH_PLL_A,
 					     DPLL_ID_PCH_PLL_B);
 	}
 
 	if (!pll)
-		return NULL;
+		return false;
 
 	/* reference the pll */
-	intel_reference_shared_dpll(pll, crtc_state);
+	intel_reference_shared_dpll(state, crtc,
+				    pll, &crtc_state->dpll_hw_state);
 
-	return pll;
+	crtc_state->shared_dpll = pll;
+
+	return true;
 }
 
 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -767,8 +799,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 	*r2_out = best.r2;
 }
 
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *
+hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
+		      struct intel_crtc *crtc)
 {
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct intel_shared_dpll *pll;
 	u32 val;
 	unsigned int p, n2, r2;
@@ -781,7 +817,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
 
 	crtc_state->dpll_hw_state.wrpll = val;
 
-	pll = intel_find_shared_dpll(crtc_state,
+	pll = intel_find_shared_dpll(state, crtc,
+				     &crtc_state->dpll_hw_state,
 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
 
 	if (!pll)
@@ -821,38 +858,44 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
 	return pll;
 }
 
-static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool hsw_get_dpll(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct intel_encoder *encoder)
 {
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct intel_shared_dpll *pll;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-		pll = hsw_ddi_hdmi_get_dpll(crtc_state);
+		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
 		pll = hsw_ddi_dp_get_dpll(crtc_state);
 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
-			return NULL;
+			return false;
 
 		crtc_state->dpll_hw_state.spll =
 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
 
-		pll = intel_find_shared_dpll(crtc_state,
+		pll = intel_find_shared_dpll(state, crtc,
+					     &crtc_state->dpll_hw_state,
 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
 	} else {
-		return NULL;
+		return false;
 	}
 
 	if (!pll)
-		return NULL;
+		return false;
 
-	intel_reference_shared_dpll(pll, crtc_state);
+	intel_reference_shared_dpll(state, crtc,
+				    pll, &crtc_state->dpll_hw_state);
 
-	return pll;
+	crtc_state->shared_dpll = pll;
+
+	return true;
 }
 
 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1385,10 +1428,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool skl_get_dpll(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct intel_encoder *encoder)
 {
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct intel_shared_dpll *pll;
 	bool bret;
 
@@ -1396,32 +1441,37 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
-			return NULL;
+			return false;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
-			return NULL;
+			return false;
 		}
 	} else {
-		return NULL;
+		return false;
 	}
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-		pll = intel_find_shared_dpll(crtc_state,
+		pll = intel_find_shared_dpll(state, crtc,
+					     &crtc_state->dpll_hw_state,
 					     DPLL_ID_SKL_DPLL0,
 					     DPLL_ID_SKL_DPLL0);
 	else
-		pll = intel_find_shared_dpll(crtc_state,
+		pll = intel_find_shared_dpll(state, crtc,
+					     &crtc_state->dpll_hw_state,
 					     DPLL_ID_SKL_DPLL1,
 					     DPLL_ID_SKL_DPLL3);
 	if (!pll)
-		return NULL;
+		return false;
 
-	intel_reference_shared_dpll(pll, crtc_state);
+	intel_reference_shared_dpll(state, crtc,
+				    pll, &crtc_state->dpll_hw_state);
 
-	return pll;
+	crtc_state->shared_dpll = pll;
+
+	return true;
 }
 
 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1827,22 +1877,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 }
 
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool bxt_get_dpll(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct intel_encoder *encoder)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id id;
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
-		return NULL;
+		return false;
 
 	if (intel_crtc_has_dp_encoder(crtc_state) &&
 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
-		return NULL;
+		return false;
 
 	/* 1:1 mapping between ports and PLLs */
 	id = (enum intel_dpll_id) encoder->port;
@@ -1851,9 +1902,12 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
 		      crtc->base.base.id, crtc->base.name, pll->info->name);
 
-	intel_reference_shared_dpll(pll, crtc_state);
+	intel_reference_shared_dpll(state, crtc,
+				    pll, &crtc_state->dpll_hw_state);
 
-	return pll;
+	crtc_state->shared_dpll = pll;
+
+	return true;
 }
 
 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1884,9 +1938,14 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
 struct intel_dpll_mgr {
 	const struct dpll_info *dpll_info;
 
-	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
-					      struct intel_encoder *encoder);
-
+	bool (*get_dplls)(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc,
+			  struct intel_encoder *encoder);
+	void (*put_dplls)(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc);
+	void (*update_active_dpll)(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc,
+				   struct intel_encoder *encoder);
 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state);
 };
@@ -1899,7 +1958,8 @@ static const struct dpll_info pch_plls[] = {
 
 static const struct intel_dpll_mgr pch_pll_mgr = {
 	.dpll_info = pch_plls,
-	.get_dpll = ibx_get_dpll,
+	.get_dplls = ibx_get_dpll,
+	.put_dplls = intel_put_dpll,
 	.dump_hw_state = ibx_dump_hw_state,
 };
 
@@ -1915,7 +1975,8 @@ static const struct dpll_info hsw_plls[] = {
 
 static const struct intel_dpll_mgr hsw_pll_mgr = {
 	.dpll_info = hsw_plls,
-	.get_dpll = hsw_get_dpll,
+	.get_dplls = hsw_get_dpll,
+	.put_dplls = intel_put_dpll,
 	.dump_hw_state = hsw_dump_hw_state,
 };
 
@@ -1929,7 +1990,8 @@ static const struct dpll_info skl_plls[] = {
 
 static const struct intel_dpll_mgr skl_pll_mgr = {
 	.dpll_info = skl_plls,
-	.get_dpll = skl_get_dpll,
+	.get_dplls = skl_get_dpll,
+	.put_dplls = intel_put_dpll,
 	.dump_hw_state = skl_dump_hw_state,
 };
 
@@ -1942,7 +2004,8 @@ static const struct dpll_info bxt_plls[] = {
 
 static const struct intel_dpll_mgr bxt_pll_mgr = {
 	.dpll_info = bxt_plls,
-	.get_dpll = bxt_get_dpll,
+	.get_dplls = bxt_get_dpll,
+	.put_dplls = intel_put_dpll,
 	.dump_hw_state = bxt_dump_hw_state,
 };
 
@@ -2332,10 +2395,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool cnl_get_dpll(struct intel_atomic_state *state,
+			 struct intel_crtc *crtc,
+			 struct intel_encoder *encoder)
 {
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
 	struct intel_shared_dpll *pll;
 	bool bret;
 
@@ -2343,31 +2408,35 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
-			return NULL;
+			return false;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
-			return NULL;
+			return false;
 		}
 	} else {
 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
 			      crtc_state->output_types);
-		return NULL;
+		return false;
 	}
 
-	pll = intel_find_shared_dpll(crtc_state,
+	pll = intel_find_shared_dpll(state, crtc,
+				     &crtc_state->dpll_hw_state,
 				     DPLL_ID_SKL_DPLL0,
 				     DPLL_ID_SKL_DPLL2);
 	if (!pll) {
 		DRM_DEBUG_KMS("No PLL selected\n");
-		return NULL;
+		return false;
 	}
 
-	intel_reference_shared_dpll(pll, crtc_state);
+	intel_reference_shared_dpll(state, crtc,
+				    pll, &crtc_state->dpll_hw_state);
 
-	return pll;
+	crtc_state->shared_dpll = pll;
+
+	return true;
 }
 
 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2394,7 +2463,8 @@ static const struct dpll_info cnl_plls[] = {
 
 static const struct intel_dpll_mgr cnl_pll_mgr = {
 	.dpll_info = cnl_plls,
-	.get_dpll = cnl_get_dpll,
+	.get_dplls = cnl_get_dpll,
+	.put_dplls = intel_put_dpll,
 	.dump_hw_state = cnl_dump_hw_state,
 };
 
@@ -2506,7 +2576,8 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 }
 
 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
-				struct intel_encoder *encoder)
+				struct intel_encoder *encoder,
+				struct intel_dpll_hw_state *pll_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	u32 cfgcr0, cfgcr1;
@@ -2533,11 +2604,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 		 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
 		 DPLL_CFGCR1_CENTRAL_FREQ_8400;
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
+	memset(pll_state, 0, sizeof(*pll_state));
 
-	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+	pll_state->cfgcr0 = cfgcr0;
+	pll_state->cfgcr1 = cfgcr1;
 
 	return true;
 }
@@ -2627,10 +2697,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
  * The specification for this function uses real numbers, so the math had to be
  * adapted to integer-only calculation, that's why it looks so different.
  */
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+				  struct intel_dpll_hw_state *pll_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
 	int refclk_khz = dev_priv->cdclk.hw.ref;
 	int clock = crtc_state->port_clock;
 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2792,63 +2862,184 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+/**
+ * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
+ * @crtc_state: state for the CRTC to select the DPLL for
+ * @port_dpll_id: the active @port_dpll_id to select
+ *
+ * Select the given @port_dpll_id instance from the DPLLs reserved for the
+ * CRTC.
+ */
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+			      enum icl_port_dpll_id port_dpll_id)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	struct intel_digital_port *intel_dig_port;
-	struct intel_shared_dpll *pll;
+	struct icl_port_dpll *port_dpll =
+		&crtc_state->icl_port_dplls[port_dpll_id];
+
+	crtc_state->shared_dpll = port_dpll->pll;
+	crtc_state->dpll_hw_state = port_dpll->hw_state;
+}
+
+static void icl_update_active_dpll(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc,
+				   struct intel_encoder *encoder)
+{
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_digital_port *primary_port;
+	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+
+	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
+		enc_to_mst(&encoder->base)->primary :
+		enc_to_dig_port(&encoder->base);
+
+	if (primary_port &&
+	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
+	     primary_port->tc_mode == TC_PORT_LEGACY))
+		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+	icl_set_active_port_dpll(crtc_state, port_dpll_id);
+}
+
+static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc,
+				   struct intel_encoder *encoder)
+{
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct icl_port_dpll *port_dpll =
+		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum port port = encoder->port;
-	enum intel_dpll_id min, max;
-	bool ret;
+	bool has_dpll4 = false;
 
-	if (intel_port_is_combophy(dev_priv, port)) {
-		min = DPLL_ID_ICL_DPLL0;
-		max = DPLL_ID_ICL_DPLL1;
-		ret = icl_calc_dpll_state(crtc_state, encoder);
-	} else if (intel_port_is_tc(dev_priv, port)) {
-		if (encoder->type == INTEL_OUTPUT_DP_MST) {
-			struct intel_dp_mst_encoder *mst_encoder;
+	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
 
-			mst_encoder = enc_to_mst(&encoder->base);
-			intel_dig_port = mst_encoder->primary;
-		} else {
-			intel_dig_port = enc_to_dig_port(&encoder->base);
-		}
-
-		if (intel_dig_port->tc_type == TC_PORT_TBT) {
-			min = DPLL_ID_ICL_TBTPLL;
-			max = min;
-			ret = icl_calc_dpll_state(crtc_state, encoder);
-		} else {
-			enum tc_port tc_port;
-
-			tc_port = intel_port_to_tc(dev_priv, port);
-			min = icl_tc_port_to_pll_id(tc_port);
-			max = min;
-			ret = icl_calc_mg_pll_state(crtc_state);
-		}
-	} else {
-		MISSING_CASE(port);
-		return NULL;
+		return false;
 	}
 
-	if (!ret) {
-		DRM_DEBUG_KMS("Could not calculate PLL state.\n");
-		return NULL;
+	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
+		has_dpll4 = true;
+
+	port_dpll->pll = intel_find_shared_dpll(state, crtc,
+						&port_dpll->hw_state,
+						DPLL_ID_ICL_DPLL0,
+						has_dpll4 ? DPLL_ID_EHL_DPLL4
+							  : DPLL_ID_ICL_DPLL1);
+	if (!port_dpll->pll) {
+		DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
+			      port_name(encoder->port));
+		return false;
 	}
 
+	intel_reference_shared_dpll(state, crtc,
+				    port_dpll->pll, &port_dpll->hw_state);
 
-	pll = intel_find_shared_dpll(crtc_state, min, max);
-	if (!pll) {
-		DRM_DEBUG_KMS("No PLL selected\n");
-		return NULL;
+	icl_update_active_dpll(state, crtc, encoder);
+
+	return true;
+}
+
+static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+				 struct intel_crtc *crtc,
+				 struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct icl_port_dpll *port_dpll;
+	enum intel_dpll_id dpll_id;
+
+	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+		return false;
 	}
 
-	intel_reference_shared_dpll(pll, crtc_state);
+	port_dpll->pll = intel_find_shared_dpll(state, crtc,
+						&port_dpll->hw_state,
+						DPLL_ID_ICL_TBTPLL,
+						DPLL_ID_ICL_TBTPLL);
+	if (!port_dpll->pll) {
+		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+		return false;
+	}
+	intel_reference_shared_dpll(state, crtc,
+				    port_dpll->pll, &port_dpll->hw_state);
 
-	return pll;
+
+	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+		goto err_unreference_tbt_pll;
+	}
+
+	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+							 encoder->port));
+	port_dpll->pll = intel_find_shared_dpll(state, crtc,
+						&port_dpll->hw_state,
+						dpll_id,
+						dpll_id);
+	if (!port_dpll->pll) {
+		DRM_DEBUG_KMS("No MG PHY PLL found\n");
+		goto err_unreference_tbt_pll;
+	}
+	intel_reference_shared_dpll(state, crtc,
+				    port_dpll->pll, &port_dpll->hw_state);
+
+	icl_update_active_dpll(state, crtc, encoder);
+
+	return true;
+
+err_unreference_tbt_pll:
+	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+
+	return false;
+}
+
+static bool icl_get_dplls(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc,
+			  struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	enum port port = encoder->port;
+
+	if (intel_port_is_combophy(dev_priv, port))
+		return icl_get_combo_phy_dpll(state, crtc, encoder);
+	else if (intel_port_is_tc(dev_priv, port))
+		return icl_get_tc_phy_dplls(state, crtc, encoder);
+
+	MISSING_CASE(port);
+
+	return false;
+}
+
+static void icl_put_dplls(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *old_crtc_state =
+		intel_atomic_get_old_crtc_state(state, crtc);
+	struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	enum icl_port_dpll_id id;
+
+	new_crtc_state->shared_dpll = NULL;
+
+	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
+		const struct icl_port_dpll *old_port_dpll =
+			&old_crtc_state->icl_port_dplls[id];
+		struct icl_port_dpll *new_port_dpll =
+			&new_crtc_state->icl_port_dplls[id];
+
+		new_port_dpll->pll = NULL;
+
+		if (!old_port_dpll->pll)
+			continue;
+
+		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+	}
 }
 
 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2945,8 +3136,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
 				   struct intel_shared_dpll *pll,
 				   struct intel_dpll_hw_state *hw_state)
 {
-	return icl_pll_get_hw_state(dev_priv, pll, hw_state,
-				    CNL_DPLL_ENABLE(pll->info->id));
+	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+	if (IS_ELKHARTLAKE(dev_priv) &&
+	    pll->info->id == DPLL_ID_EHL_DPLL4) {
+		enable_reg = MG_PLL_ENABLE(0);
+	}
+
+	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
 }
 
 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -3057,6 +3254,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
 {
 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
 
+	if (IS_ELKHARTLAKE(dev_priv) &&
+	    pll->info->id == DPLL_ID_EHL_DPLL4) {
+		enable_reg = MG_PLL_ENABLE(0);
+
+		/*
+		 * We need to disable DC states when this DPLL is enabled.
+		 * This can be done by taking a reference on DPLL4 power
+		 * domain.
+		 */
+		pll->wakeref = intel_display_power_get(dev_priv,
+						       POWER_DOMAIN_DPLL_DC_OFF);
+	}
+
 	icl_pll_power_enable(dev_priv, pll, enable_reg);
 
 	icl_dpll_write(dev_priv, pll);
@@ -3152,7 +3362,19 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 static void combo_pll_disable(struct drm_i915_private *dev_priv,
 			      struct intel_shared_dpll *pll)
 {
-	icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+	if (IS_ELKHARTLAKE(dev_priv) &&
+	    pll->info->id == DPLL_ID_EHL_DPLL4) {
+		enable_reg = MG_PLL_ENABLE(0);
+		icl_pll_disable(dev_priv, pll, enable_reg);
+
+		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
+					pll->wakeref);
+		return;
+	}
+
+	icl_pll_disable(dev_priv, pll, enable_reg);
 }
 
 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
@@ -3223,19 +3445,23 @@ static const struct dpll_info icl_plls[] = {
 
 static const struct intel_dpll_mgr icl_pll_mgr = {
 	.dpll_info = icl_plls,
-	.get_dpll = icl_get_dpll,
+	.get_dplls = icl_get_dplls,
+	.put_dplls = icl_put_dplls,
+	.update_active_dpll = icl_update_active_dpll,
 	.dump_hw_state = icl_dump_hw_state,
 };
 
 static const struct dpll_info ehl_plls[] = {
 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
 	{ },
 };
 
 static const struct intel_dpll_mgr ehl_pll_mgr = {
 	.dpll_info = ehl_plls,
-	.get_dpll = icl_get_dpll,
+	.get_dplls = icl_get_dplls,
+	.put_dplls = icl_put_dplls,
 	.dump_hw_state = icl_dump_hw_state,
 };
 
@@ -3287,50 +3513,87 @@ void intel_shared_dpll_init(struct drm_device *dev)
 }
 
 /**
- * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc_state: atomic state for the crtc
+ * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to reserve DPLLs for
  * @encoder: encoder
  *
- * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc_state to the returned pll is registered in the
- * atomic state. That configuration is made effective by calling
- * intel_shared_dpll_swap_state(). The reference should be released by calling
- * intel_release_shared_dpll().
+ * This function reserves all required DPLLs for the given CRTC and encoder
+ * combination in the current atomic commit @state and the new @crtc atomic
+ * state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * The reserved DPLLs should be released by calling
+ * intel_release_shared_dplls().
  *
  * Returns:
- * A shared DPLL to be used by @crtc_state and @encoder.
+ * True if all required DPLLs were successfully reserved.
  */
-struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
-		      struct intel_encoder *encoder)
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+				struct intel_crtc *crtc,
+				struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
 
 	if (WARN_ON(!dpll_mgr))
-		return NULL;
+		return false;
 
-	return dpll_mgr->get_dpll(crtc_state, encoder);
+	return dpll_mgr->get_dplls(state, crtc, encoder);
 }
 
 /**
- * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
- * @dpll: dpll in use by @crtc
- * @crtc: crtc
+ * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
  * @state: atomic state
+ * @crtc: crtc from which the DPLLs are to be released
  *
- * This function releases the reference from @crtc to @dpll from the
- * atomic @state. The new configuration is made effective by calling
- * intel_shared_dpll_swap_state().
+ * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * from the current atomic commit @state and the old @crtc atomic state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
  */
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
-			       struct intel_crtc *crtc,
-			       struct drm_atomic_state *state)
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+				struct intel_crtc *crtc)
 {
-	struct intel_shared_dpll_state *shared_dpll_state;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
 
-	shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
-	shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+	/*
+	 * FIXME: this function is called for every platform having a
+	 * compute_clock hook, even though the platform doesn't yet support
+	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
+	 * called on those.
+	 */
+	if (!dpll_mgr)
+		return;
+
+	dpll_mgr->put_dplls(state, crtc);
+}
+
+/**
+ * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * @state: atomic state
+ * @crtc: the CRTC for which to update the active DPLL
+ * @encoder: encoder determining the type of port DPLL
+ *
+ * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
+ * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * DPLL selected will be based on the current mode of the encoder's port.
+ */
+void intel_update_active_dpll(struct intel_atomic_state *state,
+			      struct intel_crtc *crtc,
+			      struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+	if (WARN_ON(!dpll_mgr))
+		return;
+
+	dpll_mgr->update_active_dpll(state, crtc, encoder);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index d057041..4c2c5e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 
 #include "intel_display.h"
+#include "intel_wakeref.h"
 
 /*FIXME: Move this to a more appropriate place. */
 #define abs_diff(a, b) ({			\
@@ -39,6 +40,7 @@
 struct drm_atomic_state;
 struct drm_device;
 struct drm_i915_private;
+struct intel_atomic_state;
 struct intel_crtc;
 struct intel_crtc_state;
 struct intel_encoder;
@@ -118,6 +120,10 @@ enum intel_dpll_id {
 	 */
 	DPLL_ID_ICL_DPLL1 = 1,
 	/**
+	 * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
+	 */
+	DPLL_ID_EHL_DPLL4 = 2,
+	/**
 	 * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
 	 */
 	DPLL_ID_ICL_TBTPLL = 2,
@@ -140,6 +146,13 @@ enum intel_dpll_id {
 };
 #define I915_NUM_PLLS 7
 
+enum icl_port_dpll_id {
+	ICL_PORT_DPLL_DEFAULT,
+	ICL_PORT_DPLL_MG_PHY,
+
+	ICL_PORT_DPLL_COUNT,
+};
+
 struct intel_dpll_hw_state {
 	/* i9xx, pch plls */
 	u32 dpll;
@@ -195,7 +208,7 @@ struct intel_dpll_hw_state {
  * future state which would be applied by an atomic mode set (stored in
  * a struct &intel_atomic_state).
  *
- * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ * See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
  */
 struct intel_shared_dpll_state {
 	/**
@@ -312,6 +325,7 @@ struct intel_shared_dpll {
 	 * @info: platform specific info
 	 */
 	const struct dpll_info *info;
+	intel_wakeref_t wakeref;
 };
 
 #define SKL_DPLL0 0
@@ -331,11 +345,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 			bool state);
 #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
-						struct intel_encoder *encoder);
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
-			       struct intel_crtc *crtc,
-			       struct drm_atomic_state *state);
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+				struct intel_crtc *crtc,
+				struct intel_encoder *encoder);
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+				struct intel_crtc *crtc);
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+			      enum icl_port_dpll_id port_dpll_id);
+void intel_update_active_dpll(struct intel_atomic_state *state,
+			      struct intel_crtc *crtc,
+			      struct intel_encoder *encoder);
 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index bc3a94d..a78139f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -523,12 +523,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 	 * authentication.
 	 */
 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
-	if (num_downstream == 0)
+	if (num_downstream == 0) {
+		DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
 		return -EINVAL;
+	}
 
 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
-	if (!ksv_fifo)
+	if (!ksv_fifo) {
+		DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
 		return -ENOMEM;
+	}
 
 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
 	if (ret)
@@ -1206,8 +1210,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 	if (ret < 0)
 		return ret;
 
-	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
+		DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
 		return -EINVAL;
+	}
 
 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
 
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 21339b7..0792972 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -175,6 +175,7 @@ struct overlay_registers {
 
 struct intel_overlay {
 	struct drm_i915_private *i915;
+	struct intel_context *context;
 	struct intel_crtc *crtc;
 	struct i915_vma *vma;
 	struct i915_vma *old_vma;
@@ -239,9 +240,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 
 static struct i915_request *alloc_request(struct intel_overlay *overlay)
 {
-	struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
-
-	return i915_request_create(engine->kernel_context);
+	return i915_request_create(overlay->context);
 }
 
 /* overlay needs to be disable in OCMD reg */
@@ -1359,11 +1358,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
 	if (!HAS_OVERLAY(dev_priv))
 		return;
 
+	if (!HAS_ENGINE(dev_priv, RCS0))
+		return;
+
 	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return;
 
 	overlay->i915 = dev_priv;
+	overlay->context = dev_priv->engine[RCS0]->kernel_context;
+	GEM_BUG_ON(!overlay->context);
 
 	overlay->color_key = 0x0101fe;
 	overlay->color_key_enabled = true;
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 1e2c4307..9a48f7a 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
 
 	I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
 	POSTING_READ(PIPE_CRC_CTL(crtc->index));
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index ceda03e..3fe8eae 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
 	return false;
 }
 
-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
+
 /** Mapping of command numbers to names, for debug output */
-static const struct _sdvo_cmd_name {
+static const struct {
 	u8 cmd;
 	const char *name;
 } __attribute__ ((packed)) sdvo_cmd_names[] = {
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+	SDVO_CMD_NAME_ENTRY(RESET),
+	SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
+	SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
+	SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
+	SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
+	SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
+	SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
+	SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
+	SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
+	SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
+	SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
+	SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
+	SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
+	SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
+	SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
+	SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
 
 	/* Add the op code for SDVO enhancements */
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
+	SDVO_CMD_NAME_ENTRY(GET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SET_HPOS),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
+	SDVO_CMD_NAME_ENTRY(GET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SET_VPOS),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
+	SDVO_CMD_NAME_ENTRY(GET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
+	SDVO_CMD_NAME_ENTRY(GET_HUE),
+	SDVO_CMD_NAME_ENTRY(SET_HUE),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
 
 	/* HDMI op code */
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
-	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
+	SDVO_CMD_NAME_ENTRY(GET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
+	SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
+	SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
+	SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
 };
 
+#undef SDVO_CMD_NAME_ENTRY
+
+static const char *sdvo_cmd_name(u8 cmd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+		if (cmd == sdvo_cmd_names[i].cmd)
+			return sdvo_cmd_names[i].name;
+	}
+
+	return NULL;
+}
+
 #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
 
 static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 				   const void *args, int args_len)
 {
+	const char *cmd_name;
 	int i, pos = 0;
 #define BUF_LEN 256
 	char buffer[BUF_LEN];
@@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 	for (; i < 8; i++) {
 		BUF_PRINT("   ");
 	}
-	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
-		if (cmd == sdvo_cmd_names[i].cmd) {
-			BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
-			break;
-		}
-	}
-	if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+
+	cmd_name = sdvo_cmd_name(cmd);
+	if (cmd_name)
+		BUF_PRINT("(%s)", cmd_name);
+	else
 		BUF_PRINT("(%02X)", cmd);
-	}
 	BUG_ON(pos >= BUF_LEN - 1);
 #undef BUF_PRINT
 #undef BUF_LEN
@@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 }
 
 static const char * const cmd_status_names[] = {
-	"Power on",
-	"Success",
-	"Not supported",
-	"Invalid arg",
-	"Pending",
-	"Target not specified",
-	"Scaling not supported"
+	[SDVO_CMD_STATUS_POWER_ON] = "Power on",
+	[SDVO_CMD_STATUS_SUCCESS] = "Success",
+	[SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
+	[SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
+	[SDVO_CMD_STATUS_PENDING] = "Pending",
+	[SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
+	[SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
 };
 
+static const char *sdvo_cmd_status(u8 status)
+{
+	if (status < ARRAY_SIZE(cmd_status_names))
+		return cmd_status_names[status];
+	else
+		return NULL;
+}
+
 static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
 				   const void *args, int args_len,
 				   bool unlocked)
@@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
 static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 				     void *response, int response_len)
 {
+	const char *cmd_status;
 	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 status;
 	int i, pos = 0;
@@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 #define BUF_PRINT(args...) \
 	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
 
-	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-		BUF_PRINT("(%s)", cmd_status_names[status]);
+	cmd_status = sdvo_cmd_status(status);
+	if (cmd_status)
+		BUF_PRINT("(%s)", cmd_status);
 	else
 		BUF_PRINT("(??? %d)", status);
 
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 004b520..5e5ea86 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -441,9 +441,21 @@ icl_program_input_csc(struct intel_plane *plane,
 		 */
 		[DRM_COLOR_YCBCR_BT709] = {
 			0x7C98, 0x7800, 0x0,
-			0x9EF8, 0x7800, 0xABF8,
+			0x9EF8, 0x7800, 0xAC00,
 			0x0, 0x7800,  0x7ED8,
 		},
+		/*
+		 * BT.2020 full range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.000, 0.000, 1.474,
+		 *  1.000, -0.1645, -0.5713,
+		 *  1.000, 1.8814, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT2020] = {
+			0x7BC8, 0x7800, 0x0,
+			0x8928, 0x7800, 0xAA88,
+			0x0, 0x7800, 0x7F10,
+		},
 	};
 
 	/* Matrix for Limited Range to Full Range Conversion */
@@ -451,26 +463,38 @@ icl_program_input_csc(struct intel_plane *plane,
 		/*
 		 * BT.601 Limted range YCbCr -> full range RGB
 		 * The matrix required is :
-		 * [1.164384, 0.000, 1.596370,
-		 *  1.138393, -0.382500, -0.794598,
-		 *  1.138393, 1.971696, 0.0000]
+		 * [1.164384, 0.000, 1.596027,
+		 *  1.164384, -0.39175, -0.812813,
+		 *  1.164384, 2.017232, 0.0000]
 		 */
 		[DRM_COLOR_YCBCR_BT601] = {
 			0x7CC8, 0x7950, 0x0,
-			0x8CB8, 0x7918, 0x9C40,
-			0x0, 0x7918, 0x7FC8,
+			0x8D00, 0x7950, 0x9C88,
+			0x0, 0x7950, 0x6810,
 		},
 		/*
 		 * BT.709 Limited range YCbCr -> full range RGB
 		 * The matrix required is :
-		 * [1.164, 0.000, 1.833671,
-		 *  1.138393, -0.213249, -0.532909,
-		 *  1.138393, 2.112402, 0.0000]
+		 * [1.164384, 0.000, 1.792741,
+		 *  1.164384, -0.213249, -0.532909,
+		 *  1.164384, 2.112402, 0.0000]
 		 */
 		[DRM_COLOR_YCBCR_BT709] = {
-			0x7EA8, 0x7950, 0x0,
-			0x8888, 0x7918, 0xADA8,
-			0x0, 0x7918,  0x6870,
+			0x7E58, 0x7950, 0x0,
+			0x8888, 0x7950, 0xADA8,
+			0x0, 0x7950,  0x6870,
+		},
+		/*
+		 * BT.2020 Limited range YCbCr -> full range RGB
+		 * The matrix required is :
+		 * [1.164, 0.000, 1.678,
+		 *  1.164, -0.1873, -0.6504,
+		 *  1.164, 2.1417, 0.0000]
+		 */
+		[DRM_COLOR_YCBCR_BT2020] = {
+			0x7D70, 0x7950, 0x0,
+			0x8A68, 0x7950, 0xAC00,
+			0x0, 0x7950, 0x6890,
 		},
 	};
 	const u16 *csc;
@@ -492,8 +516,11 @@ icl_program_input_csc(struct intel_plane *plane,
 
 	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
 		      PREOFF_YUV_TO_RGB_HI);
-	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
-		      PREOFF_YUV_TO_RGB_ME);
+	if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+	else
+		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+			      PREOFF_YUV_TO_RGB_ME);
 	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
 		      PREOFF_YUV_TO_RGB_LO);
 	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
@@ -683,6 +710,16 @@ skl_plane_get_hw_state(struct intel_plane *plane,
 	return ret;
 }
 
+static void i9xx_plane_linear_gamma(u16 gamma[8])
+{
+	/* The points are not evenly spaced. */
+	static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
+	int i;
+
+	for (i = 0; i < 8; i++)
+		gamma[i] = (in[i] << 8) / 32;
+}
+
 static void
 chv_update_csc(const struct intel_plane_state *plane_state)
 {
@@ -858,6 +895,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	return sprctl;
 }
 
+static void vlv_update_gamma(const struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	enum pipe pipe = plane->pipe;
+	enum plane_id plane_id = plane->id;
+	u16 gamma[8];
+	int i;
+
+	/* Seems RGB data bypasses the gamma always */
+	if (!fb->format->is_yuv)
+		return;
+
+	i9xx_plane_linear_gamma(gamma);
+
+	/* FIXME these register are single buffered :( */
+	/* The two end points are implicit (0.0 and 1.0) */
+	for (i = 1; i < 8 - 1; i++)
+		I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
+			      gamma[i] << 16 |
+			      gamma[i] << 8 |
+			      gamma[i]);
+}
+
 static void
 vlv_update_plane(struct intel_plane *plane,
 		 const struct intel_crtc_state *crtc_state,
@@ -916,6 +978,7 @@ vlv_update_plane(struct intel_plane *plane,
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 
 	vlv_update_clrc(plane_state);
+	vlv_update_gamma(plane_state);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1013,6 +1076,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
 		return 0;
 	}
 
+	sprctl |= SPRITE_INT_GAMMA_DISABLE;
+
 	if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
 		sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
 
@@ -1033,6 +1098,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	return sprctl;
 }
 
+static void ivb_sprite_linear_gamma(u16 gamma[18])
+{
+	int i;
+
+	for (i = 0; i < 17; i++)
+		gamma[i] = (i << 10) / 16;
+
+	gamma[i] = 3 << 10;
+	i++;
+}
+
+static void ivb_update_gamma(const struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	enum pipe pipe = plane->pipe;
+	u16 gamma[18];
+	int i;
+
+	ivb_sprite_linear_gamma(gamma);
+
+	/* FIXME these register are single buffered :( */
+	for (i = 0; i < 16; i++)
+		I915_WRITE_FW(SPRGAMC(pipe, i),
+			      gamma[i] << 20 |
+			      gamma[i] << 10 |
+			      gamma[i]);
+
+	I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
+	I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
+	I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+	i++;
+
+	I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
+	I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
+	I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+	i++;
+}
+
 static void
 ivb_update_plane(struct intel_plane *plane,
 		 const struct intel_crtc_state *crtc_state,
@@ -1099,6 +1203,8 @@ ivb_update_plane(struct intel_plane *plane,
 	I915_WRITE_FW(SPRSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 
+	ivb_update_gamma(plane_state);
+
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -1224,6 +1330,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
 	return dvscntr;
 }
 
+static void g4x_update_gamma(const struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	enum pipe pipe = plane->pipe;
+	u16 gamma[8];
+	int i;
+
+	/* Seems RGB data bypasses the gamma always */
+	if (!fb->format->is_yuv)
+		return;
+
+	i9xx_plane_linear_gamma(gamma);
+
+	/* FIXME these register are single buffered :( */
+	/* The two end points are implicit (0.0 and 1.0) */
+	for (i = 1; i < 8 - 1; i++)
+		I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
+			      gamma[i] << 16 |
+			      gamma[i] << 8 |
+			      gamma[i]);
+}
+
+static void ilk_sprite_linear_gamma(u16 gamma[17])
+{
+	int i;
+
+	for (i = 0; i < 17; i++)
+		gamma[i] = (i << 10) / 16;
+}
+
+static void ilk_update_gamma(const struct intel_plane_state *plane_state)
+{
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct drm_framebuffer *fb = plane_state->base.fb;
+	enum pipe pipe = plane->pipe;
+	u16 gamma[17];
+	int i;
+
+	/* Seems RGB data bypasses the gamma always */
+	if (!fb->format->is_yuv)
+		return;
+
+	ilk_sprite_linear_gamma(gamma);
+
+	/* FIXME these register are single buffered :( */
+	for (i = 0; i < 16; i++)
+		I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
+			      gamma[i] << 20 |
+			      gamma[i] << 10 |
+			      gamma[i]);
+
+	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+	i++;
+}
+
 static void
 g4x_update_plane(struct intel_plane *plane,
 		 const struct intel_crtc_state *crtc_state,
@@ -1283,6 +1449,11 @@ g4x_update_plane(struct intel_plane *plane,
 	I915_WRITE_FW(DVSSURF(pipe),
 		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
 
+	if (IS_G4X(dev_priv))
+		g4x_update_gamma(plane_state);
+	else
+		ilk_update_gamma(plane_state);
+
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
@@ -1852,52 +2023,6 @@ static const u32 skl_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
-static const u32 icl_plane_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_YUYV,
-	DRM_FORMAT_YVYU,
-	DRM_FORMAT_UYVY,
-	DRM_FORMAT_VYUY,
-	DRM_FORMAT_Y210,
-	DRM_FORMAT_Y212,
-	DRM_FORMAT_Y216,
-	DRM_FORMAT_XVYU2101010,
-	DRM_FORMAT_XVYU12_16161616,
-	DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_ABGR8888,
-	DRM_FORMAT_XRGB2101010,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_XRGB16161616F,
-	DRM_FORMAT_XBGR16161616F,
-	DRM_FORMAT_ARGB16161616F,
-	DRM_FORMAT_ABGR16161616F,
-	DRM_FORMAT_YUYV,
-	DRM_FORMAT_YVYU,
-	DRM_FORMAT_UYVY,
-	DRM_FORMAT_VYUY,
-	DRM_FORMAT_Y210,
-	DRM_FORMAT_Y212,
-	DRM_FORMAT_Y216,
-	DRM_FORMAT_XVYU2101010,
-	DRM_FORMAT_XVYU12_16161616,
-	DRM_FORMAT_XVYU16161616,
-};
-
 static const u32 skl_planar_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
@@ -1933,7 +2058,28 @@ static const u32 glk_planar_formats[] = {
 	DRM_FORMAT_P016,
 };
 
-static const u32 icl_planar_formats[] = {
+static const u32 icl_sdr_y_plane_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_Y210,
+	DRM_FORMAT_Y212,
+	DRM_FORMAT_Y216,
+	DRM_FORMAT_XVYU2101010,
+	DRM_FORMAT_XVYU12_16161616,
+	DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_sdr_uv_plane_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -1958,7 +2104,7 @@ static const u32 icl_planar_formats[] = {
 	DRM_FORMAT_XVYU16161616,
 };
 
-static const u32 icl_hdr_planar_formats[] = {
+static const u32 icl_hdr_plane_formats[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_XRGB8888,
@@ -2201,9 +2347,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
 static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
 				 enum pipe pipe, enum plane_id plane_id)
 {
-	if (INTEL_GEN(dev_priv) >= 11)
-		return plane_id <= PLANE_SPRITE3;
-
 	/* Display WA #0870: skl, bxt */
 	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 		return false;
@@ -2217,6 +2360,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
 	return true;
 }
 
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+					enum pipe pipe, enum plane_id plane_id,
+					int *num_formats)
+{
+	if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+		*num_formats = ARRAY_SIZE(skl_planar_formats);
+		return skl_planar_formats;
+	} else {
+		*num_formats = ARRAY_SIZE(skl_plane_formats);
+		return skl_plane_formats;
+	}
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+					enum pipe pipe, enum plane_id plane_id,
+					int *num_formats)
+{
+	if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+		*num_formats = ARRAY_SIZE(glk_planar_formats);
+		return glk_planar_formats;
+	} else {
+		*num_formats = ARRAY_SIZE(skl_plane_formats);
+		return skl_plane_formats;
+	}
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+					enum pipe pipe, enum plane_id plane_id,
+					int *num_formats)
+{
+	if (icl_is_hdr_plane(dev_priv, plane_id)) {
+		*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+		return icl_hdr_plane_formats;
+	} else if (icl_is_nv12_y_plane(plane_id)) {
+		*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+		return icl_sdr_y_plane_formats;
+	} else {
+		*num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+		return icl_sdr_uv_plane_formats;
+	}
+}
+
 static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 			      enum pipe pipe, enum plane_id plane_id)
 {
@@ -2270,30 +2455,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 	if (icl_is_nv12_y_plane(plane_id))
 		plane->update_slave = icl_update_slave;
 
-	if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
-		if (icl_is_hdr_plane(dev_priv, plane_id)) {
-			formats = icl_hdr_planar_formats;
-			num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
-		} else if (INTEL_GEN(dev_priv) >= 11) {
-			formats = icl_planar_formats;
-			num_formats = ARRAY_SIZE(icl_planar_formats);
-		} else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
-			formats = glk_planar_formats;
-			num_formats = ARRAY_SIZE(glk_planar_formats);
-		} else {
-			formats = skl_planar_formats;
-			num_formats = ARRAY_SIZE(skl_planar_formats);
-		}
-	} else if (icl_is_hdr_plane(dev_priv, plane_id)) {
-		formats = icl_hdr_plane_formats;
-		num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
-	} else if (INTEL_GEN(dev_priv) >= 11) {
-		formats = icl_plane_formats;
-		num_formats = ARRAY_SIZE(icl_plane_formats);
-	} else {
-		formats = skl_plane_formats;
-		num_formats = ARRAY_SIZE(skl_plane_formats);
-	}
+	if (INTEL_GEN(dev_priv) >= 11)
+		formats = icl_get_plane_formats(dev_priv, pipe,
+						plane_id, &num_formats);
+	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+		formats = glk_get_plane_formats(dev_priv, pipe,
+						plane_id, &num_formats);
+	else
+		formats = skl_get_plane_formats(dev_priv, pipe,
+						plane_id, &num_formats);
 
 	plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
 	if (plane->has_ccs)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
new file mode 100644
index 0000000..53103a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_display.h"
+#include "intel_dp_mst.h"
+#include "i915_drv.h"
+#include "intel_tc.h"
+
+static const char *tc_port_mode_name(enum tc_port_mode mode)
+{
+	static const char * const names[] = {
+		[TC_PORT_TBT_ALT] = "tbt-alt",
+		[TC_PORT_DP_ALT] = "dp-alt",
+		[TC_PORT_LEGACY] = "legacy",
+	};
+
+	if (WARN_ON(mode >= ARRAY_SIZE(names)))
+		mode = TC_PORT_TBT_ALT;
+
+	return names[mode];
+}
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	u32 lane_mask;
+
+	lane_mask = I915_READ(PORT_TX_DFLEXDPSP);
+
+	WARN_ON(lane_mask == 0xffffffff);
+
+	return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+	       DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+}
+
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	intel_wakeref_t wakeref;
+	u32 lane_mask;
+
+	if (dig_port->tc_mode != TC_PORT_DP_ALT)
+		return 4;
+
+	lane_mask = 0;
+	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+		lane_mask = intel_tc_port_get_lane_mask(dig_port);
+
+	switch (lane_mask) {
+	default:
+		MISSING_CASE(lane_mask);
+		/* fall-through */
+	case 0x1:
+	case 0x2:
+	case 0x4:
+	case 0x8:
+		return 1;
+	case 0x3:
+	case 0xc:
+		return 2;
+	case 0xf:
+		return 4;
+	}
+}
+
+static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+				      u32 live_status_mask)
+{
+	u32 valid_hpd_mask;
+
+	if (dig_port->tc_legacy_port)
+		valid_hpd_mask = BIT(TC_PORT_LEGACY);
+	else
+		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
+				 BIT(TC_PORT_TBT_ALT);
+
+	if (!(live_status_mask & ~valid_hpd_mask))
+		return;
+
+	/* If live status mismatches the VBT flag, trust the live status. */
+	DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+		  dig_port->tc_port_name, live_status_mask);
+
+	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	u32 mask = 0;
+	u32 val;
+
+	val = I915_READ(PORT_TX_DFLEXDPSP);
+
+	if (val == 0xffffffff) {
+		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
+			      dig_port->tc_port_name);
+		return mask;
+	}
+
+	if (val & TC_LIVE_STATE_TBT(tc_port))
+		mask |= BIT(TC_PORT_TBT_ALT);
+	if (val & TC_LIVE_STATE_TC(tc_port))
+		mask |= BIT(TC_PORT_DP_ALT);
+
+	if (I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+		mask |= BIT(TC_PORT_LEGACY);
+
+	/* The sink can be connected only in a single mode. */
+	if (!WARN_ON(hweight32(mask) > 1))
+		tc_port_fixup_legacy_flag(dig_port, mask);
+
+	return mask;
+}
+
+static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	u32 val;
+
+	val = I915_READ(PORT_TX_DFLEXDPPMS);
+	if (val == 0xffffffff) {
+		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
+			      dig_port->tc_port_name);
+		return false;
+	}
+
+	return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+}
+
+static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
+				     bool enable)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	u32 val;
+
+	val = I915_READ(PORT_TX_DFLEXDPCSSS);
+	if (val == 0xffffffff) {
+		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+			      dig_port->tc_port_name,
+			      enableddisabled(enable));
+
+		return false;
+	}
+
+	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+	if (!enable)
+		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+
+	I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+
+	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+		DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
+			      dig_port->tc_port_name);
+
+	return true;
+}
+
+static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+	u32 val;
+
+	val = I915_READ(PORT_TX_DFLEXDPCSSS);
+	if (val == 0xffffffff) {
+		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
+			      dig_port->tc_port_name);
+		return true;
+	}
+
+	return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ */
+static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
+			       int required_lanes)
+{
+	int max_lanes;
+
+	if (!icl_tc_phy_status_complete(dig_port)) {
+		DRM_DEBUG_KMS("Port %s: PHY not ready\n",
+			      dig_port->tc_port_name);
+		goto out_set_tbt_alt_mode;
+	}
+
+	if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
+	    !WARN_ON(dig_port->tc_legacy_port))
+		goto out_set_tbt_alt_mode;
+
+	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
+	if (dig_port->tc_legacy_port) {
+		WARN_ON(max_lanes != 4);
+		dig_port->tc_mode = TC_PORT_LEGACY;
+
+		return;
+	}
+
+	/*
+	 * Now we have to re-check the live state, in case the port recently
+	 * became disconnected. Not necessary for legacy mode.
+	 */
+	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+		DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
+			      dig_port->tc_port_name);
+		goto out_set_safe_mode;
+	}
+
+	if (max_lanes < required_lanes) {
+		DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
+			      dig_port->tc_port_name,
+			      max_lanes, required_lanes);
+		goto out_set_safe_mode;
+	}
+
+	dig_port->tc_mode = TC_PORT_DP_ALT;
+
+	return;
+
+out_set_safe_mode:
+	icl_tc_phy_set_safe_mode(dig_port, true);
+out_set_tbt_alt_mode:
+	dig_port->tc_mode = TC_PORT_TBT_ALT;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+{
+	switch (dig_port->tc_mode) {
+	case TC_PORT_LEGACY:
+		/* Nothing to do, we never disconnect from legacy mode */
+		break;
+	case TC_PORT_DP_ALT:
+		icl_tc_phy_set_safe_mode(dig_port, true);
+		dig_port->tc_mode = TC_PORT_TBT_ALT;
+		break;
+	case TC_PORT_TBT_ALT:
+		/* Nothing to do, we stay in TBT-alt mode */
+		break;
+	default:
+		MISSING_CASE(dig_port->tc_mode);
+	}
+}
+
+static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+{
+	if (!icl_tc_phy_status_complete(dig_port)) {
+		DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
+			      dig_port->tc_port_name);
+		return dig_port->tc_mode == TC_PORT_TBT_ALT;
+	}
+
+	if (icl_tc_phy_is_in_safe_mode(dig_port)) {
+		DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
+			      dig_port->tc_port_name);
+
+		return false;
+	}
+
+	return dig_port->tc_mode == TC_PORT_DP_ALT ||
+	       dig_port->tc_mode == TC_PORT_LEGACY;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+{
+	u32 live_status_mask = tc_port_live_status_mask(dig_port);
+	bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
+	enum tc_port_mode mode;
+
+	if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+		return TC_PORT_TBT_ALT;
+
+	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
+	if (live_status_mask) {
+		enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+
+		if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+			mode = live_mode;
+	}
+
+	return mode;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+{
+	u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+	if (live_status_mask)
+		return fls(live_status_mask) - 1;
+
+	return icl_tc_phy_status_complete(dig_port) &&
+	       dig_port->tc_legacy_port ? TC_PORT_LEGACY :
+					  TC_PORT_TBT_ALT;
+}
+
+static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+				     int required_lanes)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+
+	intel_display_power_flush_work(dev_priv);
+	WARN_ON(intel_display_power_is_enabled(dev_priv,
+					       intel_aux_power_domain(dig_port)));
+
+	icl_tc_phy_disconnect(dig_port);
+	icl_tc_phy_connect(dig_port, required_lanes);
+
+	DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
+		      dig_port->tc_port_name,
+		      tc_port_mode_name(old_tc_mode),
+		      tc_port_mode_name(dig_port->tc_mode));
+}
+
+static void
+intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
+				 int refcount)
+{
+	WARN_ON(dig_port->tc_link_refcount);
+	dig_port->tc_link_refcount = refcount;
+}
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+{
+	struct intel_encoder *encoder = &dig_port->base;
+	int active_links = 0;
+
+	mutex_lock(&dig_port->tc_lock);
+
+	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+	if (dig_port->dp.is_mst)
+		active_links = intel_dp_mst_encoder_active_links(dig_port);
+	else if (encoder->base.crtc)
+		active_links = to_intel_crtc(encoder->base.crtc)->active;
+
+	if (active_links) {
+		if (!icl_tc_phy_is_connected(dig_port))
+			DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
+				      dig_port->tc_port_name, active_links);
+		intel_tc_port_link_init_refcount(dig_port, active_links);
+
+		goto out;
+	}
+
+	if (dig_port->tc_legacy_port)
+		icl_tc_phy_connect(dig_port, 1);
+
+out:
+	DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
+		      dig_port->tc_port_name,
+		      tc_port_mode_name(dig_port->tc_mode));
+
+	mutex_unlock(&dig_port->tc_lock);
+}
+
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+{
+	bool is_connected;
+
+	intel_tc_port_lock(dig_port);
+	is_connected = tc_port_live_status_mask(dig_port) &
+		       BIT(dig_port->tc_mode);
+	intel_tc_port_unlock(dig_port);
+
+	return is_connected;
+}
+
+static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+				 int required_lanes)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+
+	mutex_lock(&dig_port->tc_lock);
+
+	if (!dig_port->tc_link_refcount &&
+	    intel_tc_port_needs_reset(dig_port))
+		intel_tc_port_reset_mode(dig_port, required_lanes);
+
+	WARN_ON(dig_port->tc_lock_wakeref);
+	dig_port->tc_lock_wakeref = wakeref;
+}
+
+void intel_tc_port_lock(struct intel_digital_port *dig_port)
+{
+	__intel_tc_port_lock(dig_port, 1);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+
+	mutex_unlock(&dig_port->tc_lock);
+
+	intel_display_power_put_async(dev_priv, POWER_DOMAIN_DISPLAY_CORE,
+				      wakeref);
+}
+
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+			    int required_lanes)
+{
+	__intel_tc_port_lock(dig_port, required_lanes);
+	dig_port->tc_link_refcount++;
+	intel_tc_port_unlock(dig_port);
+}
+
+void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+	mutex_lock(&dig_port->tc_lock);
+	dig_port->tc_link_refcount--;
+	mutex_unlock(&dig_port->tc_lock);
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+{
+	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+	enum port port = dig_port->base.port;
+	enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+	if (WARN_ON(tc_port == PORT_TC_NONE))
+		return;
+
+	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
+		 "%c/TC#%d", port_name(port), tc_port + 1);
+
+	mutex_init(&dig_port->tc_lock);
+	dig_port->tc_legacy_port = is_legacy;
+	dig_port->tc_link_refcount = 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
new file mode 100644
index 0000000..0d8411d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_H__
+#define __INTEL_TC_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include "intel_drv.h"
+
+bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
+void intel_tc_port_lock(struct intel_digital_port *dig_port);
+void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+			    int required_lanes);
+void intel_tc_port_put_link(struct intel_digital_port *dig_port);
+
+static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+	return mutex_is_locked(&dig_port->tc_lock) ||
+	       dig_port->tc_link_refcount;
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+
+#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index e272d82..c8002ff 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1644,7 +1644,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
 	return intel_dsi_get_panel_orientation(connector);
 }
 
-static void intel_dsi_add_properties(struct intel_connector *connector)
+static void vlv_dsi_add_properties(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
@@ -1983,7 +1983,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
-	intel_dsi_add_properties(intel_connector);
+	vlv_dsi_add_properties(intel_connector);
 
 	return;
 
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
index 07e7b8b..eec6961 100644
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ b/drivers/gpu/drm/i915/gem/Makefile
@@ -1 +1,5 @@
-include $(src)/Makefile.header-test # Extra header tests
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab07..6f537e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 	vma->ops = &proxy_vma_ops;
 
 	sleeve->vma = vma;
-	sleeve->obj = i915_gem_object_get(obj);
 	sleeve->pages = pages;
 	sleeve->page_sizes = *page_sizes;
 
@@ -85,7 +84,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
 
 static void destroy_sleeve(struct i915_sleeve *sleeve)
 {
-	i915_gem_object_put(sleeve->obj);
 	kfree(sleeve);
 }
 
@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
 {
 	struct clear_pages_work *w = container_of(work, typeof(*w), work);
 	struct drm_i915_private *i915 = w->ce->gem_context->i915;
-	struct drm_i915_gem_object *obj = w->sleeve->obj;
+	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
 	struct i915_vma *vma = w->sleeve->vma;
 	struct i915_request *rq;
 	int err = w->dma.error;
@@ -164,11 +162,12 @@ static void clear_pages_worker(struct work_struct *work)
 		goto out_signal;
 
 	if (obj->cache_dirty) {
-		obj->write_domain = 0;
 		if (i915_gem_object_has_struct_page(obj))
 			drm_clflush_sg(w->sleeve->pages);
 		obj->cache_dirty = false;
 	}
+	obj->read_domains = I915_GEM_GPU_DOMAINS;
+	obj->write_domain = 0;
 
 	/* XXX: we need to kill this */
 	mutex_lock(&i915->drm.struct_mutex);
@@ -193,10 +192,12 @@ static void clear_pages_worker(struct work_struct *work)
 			goto out_request;
 	}
 
-	/* XXX: more feverish nightmares await */
-	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
+	/*
+	 * w->dma is already exported via (vma|obj)->resv we need only
+	 * keep track of the GPU activity within this vma/request, and
+	 * propagate the signal from the request to w->dma.
+	 */
+	err = i915_active_ref(&vma->active, rq->fence.context, rq);
 	if (err)
 		goto out_request;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0f2c22a..e367dce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -316,7 +316,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	mutex_destroy(&ctx->engines_mutex);
 
 	if (ctx->timeline)
-		i915_timeline_put(ctx->timeline);
+		intel_timeline_put(ctx->timeline);
 
 	kfree(ctx->name);
 	put_pid(ctx->pid);
@@ -528,9 +528,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 	}
 
 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
-		struct i915_timeline *timeline;
+		struct intel_timeline *timeline;
 
-		timeline = i915_timeline_create(dev_priv, NULL);
+		timeline = intel_timeline_create(&dev_priv->gt, NULL);
 		if (IS_ERR(timeline)) {
 			context_close(ctx);
 			return ERR_CAST(timeline);
@@ -646,7 +646,7 @@ static void init_contexts(struct drm_i915_private *i915)
 
 static bool needs_preempt_context(struct drm_i915_private *i915)
 {
-	return HAS_EXECLISTS(i915);
+	return USES_GUC_SUBMISSION(i915);
 }
 
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
@@ -657,7 +657,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(dev_priv->kernel_context);
 	GEM_BUG_ON(dev_priv->preempt_context);
 
-	intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
 	init_contexts(dev_priv);
 
 	/* lowest priority; idle task */
@@ -923,8 +922,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 	if (!cb)
 		return -ENOMEM;
 
-	i915_active_init(i915, &cb->base, cb_retire);
-	i915_active_acquire(&cb->base);
+	i915_active_init(i915, &cb->base, NULL, cb_retire);
+	err = i915_active_acquire(&cb->base);
+	if (err) {
+		kfree(cb);
+		return err;
+	}
 
 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
 		struct i915_request *rq;
@@ -2015,8 +2018,8 @@ static int clone_timeline(struct i915_gem_context *dst,
 		GEM_BUG_ON(src->timeline == dst->timeline);
 
 		if (dst->timeline)
-			i915_timeline_put(dst->timeline);
-		dst->timeline = i915_timeline_get(src->timeline);
+			intel_timeline_put(dst->timeline);
+		dst->timeline = intel_timeline_get(src->timeline);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9691dd06..106e2cc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -198,12 +198,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
 }
 
 static inline struct intel_context *
-i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
-{
-	return i915_gem_context_engines(ctx)->engines[idx];
-}
-
-static inline struct intel_context *
 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
 {
 	struct intel_context *ce = ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cc51341..0ee6148 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -26,7 +26,7 @@ struct pid;
 struct drm_i915_private;
 struct drm_i915_file_private;
 struct i915_address_space;
-struct i915_timeline;
+struct intel_timeline;
 struct intel_ring;
 
 struct i915_gem_engines {
@@ -77,7 +77,7 @@ struct i915_gem_context {
 	struct i915_gem_engines __rcu *engines;
 	struct mutex engines_mutex; /* guards writes to engines */
 
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 
 	/**
 	 * @vm: unique address space (GTT)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fae0e5..1c5dfbf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -16,6 +16,7 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
 #include "i915_gem_ioctls.h"
@@ -994,7 +995,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
 	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
 	i915_gem_object_unpin_map(cache->rq->batch->obj);
 
-	i915_gem_chipset_flush(cache->rq->i915);
+	intel_gt_chipset_flush(cache->rq->engine->gt);
 
 	i915_request_add(cache->rq);
 	cache->rq = NULL;
@@ -1954,7 +1955,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	eb->exec = NULL;
 
 	/* Unconditionally flush any chipset caches (for streaming writes). */
-	i915_gem_chipset_flush(eb->i915);
+	intel_gt_chipset_flush(eb->engine->gt);
 	return 0;
 
 err_skip:
@@ -2436,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 */
-	intel_gt_pm_get(eb.i915);
+	intel_gt_pm_get(&eb.i915->gt);
 
 	err = i915_mutex_lock_interruptible(dev);
 	if (err)
@@ -2606,7 +2607,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 err_rpm:
-	intel_gt_pm_put(eb.i915);
+	intel_gt_pm_put(&eb.i915->gt);
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index be6cacc..d5197a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -23,7 +23,7 @@
  */
 
 #include "display/intel_frontbuffer.h"
-
+#include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
@@ -146,6 +146,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	}
 }
 
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+	struct drm_i915_gem_object *obj =
+		container_of(head, typeof(*obj), rcu);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+	reservation_object_fini(&obj->base._resv);
+	i915_gem_object_free(obj);
+
+	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+	atomic_dec(&i915->mm.free_count);
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
 				    struct llist_node *freed)
 {
@@ -160,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		mutex_lock(&i915->drm.struct_mutex);
 
-		GEM_BUG_ON(i915_gem_object_is_active(obj));
 		list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
 			GEM_BUG_ON(i915_vma_is_active(vma));
 			vma->flags &= ~I915_VMA_PIN_MASK;
@@ -169,22 +181,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(!list_empty(&obj->vma.list));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
 
-		/*
-		 * This serializes freeing with the shrinker. Since the free
-		 * is delayed, first by RCU then by the workqueue, we want the
-		 * shrinker to be able to free pages of unreferenced objects,
-		 * or else we may oom whilst there are plenty of deferred
-		 * freed objects.
-		 */
-		if (i915_gem_object_has_pages(obj) &&
-		    i915_gem_object_is_shrinkable(obj)) {
-			unsigned long flags;
-
-			spin_lock_irqsave(&i915->mm.obj_lock, flags);
-			list_del_init(&obj->mm.link);
-			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-		}
-
 		mutex_unlock(&i915->drm.struct_mutex);
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
@@ -192,25 +188,21 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
 		GEM_BUG_ON(!list_empty(&obj->lut_list));
 
-		if (obj->ops->release)
-			obj->ops->release(obj);
-
 		atomic_set(&obj->mm.pages_pin_count, 0);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		GEM_BUG_ON(i915_gem_object_has_pages(obj));
+		bitmap_free(obj->bit_17);
 
 		if (obj->base.import_attach)
 			drm_prime_gem_destroy(&obj->base, NULL);
 
-		drm_gem_object_release(&obj->base);
+		drm_gem_free_mmap_offset(&obj->base);
 
-		bitmap_free(obj->bit_17);
-		i915_gem_object_free(obj);
+		if (obj->ops->release)
+			obj->ops->release(obj);
 
-		GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
-		atomic_dec(&i915->mm.free_count);
-
-		cond_resched();
+		/* But keep the pointer alive for RCU-protected lookups */
+		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 	}
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
@@ -261,18 +253,34 @@ static void __i915_gem_free_work(struct work_struct *work)
 	spin_unlock(&i915->mm.free_lock);
 }
 
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
-	struct drm_i915_gem_object *obj =
-		container_of(head, typeof(*obj), rcu);
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
 	/*
-	 * We reuse obj->rcu for the freed list, so we had better not treat
-	 * it like a rcu_head from this point forwards. And we expect all
-	 * objects to be freed via this path.
+	 * Before we free the object, make sure any pure RCU-only
+	 * read-side critical sections are complete, e.g.
+	 * i915_gem_busy_ioctl(). For the corresponding synchronized
+	 * lookup see i915_gem_object_lookup_rcu().
 	 */
-	destroy_rcu_head(&obj->rcu);
+	atomic_inc(&i915->mm.free_count);
+
+	/*
+	 * This serializes freeing with the shrinker. Since the free
+	 * is delayed, first by RCU then by the workqueue, we want the
+	 * shrinker to be able to free pages of unreferenced objects,
+	 * or else we may oom whilst there are plenty of deferred
+	 * freed objects.
+	 */
+	if (i915_gem_object_has_pages(obj) &&
+	    i915_gem_object_is_shrinkable(obj)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&i915->mm.obj_lock, flags);
+		list_del_init(&obj->mm.link);
+		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	}
 
 	/*
 	 * Since we require blocking on struct_mutex to unbind the freed
@@ -288,20 +296,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
-	/*
-	 * Before we free the object, make sure any pure RCU-only
-	 * read-side critical sections are complete, e.g.
-	 * i915_gem_busy_ioctl(). For the corresponding synchronized
-	 * lookup see i915_gem_object_lookup_rcu().
-	 */
-	atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
-	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
 static inline enum fb_op_origin
 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
 {
@@ -319,7 +313,6 @@ void
 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 				   unsigned int flush_domains)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
 
 	assert_object_held(obj);
@@ -329,7 +322,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 
 	switch (obj->write_domain) {
 	case I915_GEM_DOMAIN_GTT:
-		i915_gem_flush_ggtt_writes(dev_priv);
+		for_each_ggtt_vma(vma, obj)
+			intel_gt_flush_ggtt_writes(vma->vm->gt);
 
 		intel_fb_obj_flush(obj,
 				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -340,6 +334,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 
 			i915_vma_unset_ggtt_write(vma);
 		}
+
 		break;
 
 	case I915_GEM_DOMAIN_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd57..20754c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -159,12 +159,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 }
 
 static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
-	return READ_ONCE(obj->active_count);
-}
-
-static inline bool
 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 {
 	return READ_ONCE(obj->framebuffer_references);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 18bf4f8..34b51fa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -154,7 +154,6 @@ struct drm_i915_gem_object {
 
 	/** Count of VMA actually bound by this object */
 	atomic_t bind_count;
-	unsigned int active_count;
 	/** Count of how many global VMA are currently pinned for use by HW */
 	unsigned int pin_global;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 2deac93..102fd7a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -13,6 +13,7 @@
 #include <drm/drm_legacy.h> /* for drm_pci.h! */
 #include <drm/drm_pci.h>
 
+#include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
@@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 		vaddr += PAGE_SIZE;
 	}
 
-	i915_gem_chipset_flush(to_i915(obj->base.dev));
+	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st) {
@@ -132,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 	drm_pci_free(obj->base.dev, obj->phys_handle);
 }
 
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-{
-	i915_gem_object_unpin_pages(obj);
-}
-
 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 	.get_pages = i915_gem_object_get_pages_phys,
 	.put_pages = i915_gem_object_put_pages_phys,
-	.release = i915_gem_object_release_phys,
 };
 
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
@@ -158,7 +152,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (obj->ops != &i915_gem_shmem_ops)
 		return -EINVAL;
 
-	err = i915_gem_object_unbind(obj);
+	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 05011d4..4d77437 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -38,7 +38,7 @@ static void i915_gem_park(struct drm_i915_private *i915)
 		i915_gem_batch_pool_fini(&engine->batch_pool);
 	}
 
-	i915_timelines_park(i915);
+	intel_timelines_park(i915);
 	i915_vma_parked(i915);
 
 	i915_globals_park();
@@ -54,7 +54,8 @@ static void idle_work_handler(struct work_struct *work)
 	mutex_lock(&i915->drm.struct_mutex);
 
 	intel_wakeref_lock(&i915->gt.wakeref);
-	park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
+	park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
+		!work_pending(work));
 	intel_wakeref_unlock(&i915->gt.wakeref);
 	if (park)
 		i915_gem_park(i915);
@@ -253,14 +254,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
 
+	if (i915_gem_init_hw(i915))
+		goto err_wedged;
+
 	/*
 	 * As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	intel_gt_resume(i915);
-
-	if (i915_gem_init_hw(i915))
+	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
 	intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19d9ecd..d2a1158 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
+static void shmem_release(struct drm_i915_gem_object *obj)
+{
+	fput(obj->base.filp);
+}
+
 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 		 I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
 	.writeback = shmem_writeback,
 
 	.pwrite = shmem_pwrite,
+
+	.release = shmem_release,
 };
 
 static int create_shmem(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3a926a8..3f4c6bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
+			      unsigned long shrink)
 {
-	if (i915_gem_object_unbind(obj) == 0)
+	unsigned long flags;
+
+	flags = 0;
+	if (shrink & I915_SHRINK_ACTIVE)
+		flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+
+	if (i915_gem_object_unbind(obj, flags) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+
 	return !i915_gem_object_has_pages(obj);
 }
 
@@ -169,7 +177,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	 */
 
 	trace_i915_gem_shrink(i915, target, shrink);
-	i915_retire_requests(i915);
 
 	/*
 	 * Unbinding of objects will require HW access; Let us not wake the
@@ -230,8 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 				continue;
 
 			if (!(shrink & I915_SHRINK_ACTIVE) &&
-			    (i915_gem_object_is_active(obj) ||
-			     i915_gem_object_is_framebuffer(obj)))
+			    i915_gem_object_is_framebuffer(obj))
 				continue;
 
 			if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
-			if (unsafe_drop_pages(obj)) {
+			if (unsafe_drop_pages(obj, shrink)) {
 				/* May arrive from get_pages on another bo */
 				mutex_lock_nested(&obj->mm.lock,
 						  I915_MM_SHRINKER);
@@ -269,8 +275,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	if (shrink & I915_SHRINK_BOUND)
 		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
-	i915_retire_requests(i915);
-
 	shrinker_unlock(i915, unlock);
 
 	if (nr_scanned)
@@ -427,12 +431,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 	if (!shrinker_lock(i915, 0, &unlock))
 		return NOTIFY_DONE;
 
-	/* Force everything onto the inactive lists */
-	if (i915_gem_wait_for_idle(i915,
-				   I915_WAIT_LOCKED,
-				   MAX_SCHEDULE_TIMEOUT))
-		goto out;
-
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 		freed_pages += i915_gem_shrink(i915, -1UL, NULL,
 					       I915_SHRINK_BOUND |
@@ -455,7 +453,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 	}
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
-out:
 	shrinker_unlock(i915, unlock);
 
 	*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index de1fab20..639c852 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(!stolen);
 
-	__i915_gem_object_unpin_pages(obj);
-
 	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b616..b9d2bb1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -150,7 +150,8 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 			}
 		}
 
-		ret = i915_gem_object_unbind(obj);
+		ret = i915_gem_object_unbind(obj,
+					     I915_GEM_OBJECT_UNBIND_ACTIVE);
 		if (ret == 0)
 			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
 		i915_gem_object_put(obj);
@@ -662,9 +663,25 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 	__i915_gem_object_release_shmem(obj, pages, true);
 	i915_gem_gtt_finish_pages(obj, pages);
 
+	/*
+	 * We always mark objects as dirty when they are used by the GPU,
+	 * just in case. However, if we set the vma as being read-only we know
+	 * that the object will never have been written to.
+	 */
+	if (i915_gem_object_is_readonly(obj))
+		obj->mm.dirty = false;
+
 	for_each_sgt_page(page, sgt_iter, pages) {
 		if (obj->mm.dirty)
-			set_page_dirty(page);
+			/*
+			 * As this may not be anonymous memory (e.g. shmem)
+			 * but exist on a real mapping, we have to lock
+			 * the page in order to dirty it -- holding
+			 * the page reference is not sufficient to
+			 * prevent the inode from being truncated.
+			 * Play safe and take the lock.
+			 */
+			set_page_dirty_lock(page);
 
 		mark_page_accessed(page);
 		put_page(page);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b74729b..86eed4c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -10,6 +10,8 @@
 
 #include "gem/i915_gem_pm.h"
 
+#include "gt/intel_gt.h"
+
 #include "igt_gem_utils.h"
 #include "mock_context.h"
 
@@ -926,7 +928,7 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
 	}
 
 	*cmd = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(i915);
+	intel_gt_chipset_flush(vma->vm->gt);
 
 	i915_gem_object_unpin_map(obj);
 
@@ -1037,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
 			    u64 size, u64 offset,
 			    u32 dword, u32 val)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+	struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	struct i915_vma *vma;
 	int err;
@@ -1421,6 +1422,9 @@ static int igt_ppgtt_pin_update(void *arg)
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	unsigned int n;
 	int first, last;
 	int err;
 
@@ -1518,11 +1522,20 @@ static int igt_ppgtt_pin_update(void *arg)
 	 * land in the now stale 2M page.
 	 */
 
-	err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
-	if (err)
-		goto out_unpin;
+	n = 0;
+	for_each_engine(engine, dev_priv, id) {
+		if (!intel_engine_can_store_dword(engine))
+			continue;
 
-	err = cpu_check(obj, 0, 0xdeadbeaf);
+		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+		if (err)
+			goto out_unpin;
+	}
+	while (n--) {
+		err = cpu_check(obj, n, 0xdeadbeaf);
+		if (err)
+			goto out_unpin;
+	}
 
 out_unpin:
 	i915_vma_unpin(vma);
@@ -1598,8 +1611,11 @@ static int igt_shrink_thp(void *arg)
 	struct drm_i915_private *i915 = ctx->i915;
 	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
 	struct drm_i915_gem_object *obj;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER;
+	unsigned int n;
 	int err;
 
 	/*
@@ -1635,9 +1651,15 @@ static int igt_shrink_thp(void *arg)
 	if (err)
 		goto out_unpin;
 
-	err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
-	if (err)
-		goto out_unpin;
+	n = 0;
+	for_each_engine(engine, i915, id) {
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+		if (err)
+			goto out_unpin;
+	}
 
 	i915_vma_unpin(vma);
 
@@ -1662,7 +1684,12 @@ static int igt_shrink_thp(void *arg)
 	if (err)
 		goto out_close;
 
-	err = cpu_check(obj, 0, 0xdeadbeaf);
+	while (n--) {
+		err = cpu_check(obj, n, 0xdeadbeaf);
+		if (err)
+			goto out_unpin;
+	}
+
 
 out_unpin:
 	i915_vma_unpin(vma);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index f3a5eb8..fa79233 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -11,8 +11,8 @@
 
 static int igt_client_fill(void *arg)
 {
-	struct intel_context *ce = arg;
-	struct drm_i915_private *i915 = ce->gem_context->i915;
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
 	struct drm_i915_gem_object *obj;
 	struct rnd_state prng;
 	IGT_TIMEOUT(end);
@@ -63,17 +63,6 @@ static int igt_client_fill(void *arg)
 		if (err)
 			goto err_unpin;
 
-		/*
-		 * XXX: For now do the wait without the object resv lock to
-		 * ensure we don't deadlock.
-		 */
-		err = i915_gem_object_wait(obj,
-					   I915_WAIT_INTERRUPTIBLE |
-					   I915_WAIT_ALL,
-					   MAX_SCHEDULE_TIMEOUT);
-		if (err)
-			goto err_unpin;
-
 		i915_gem_object_lock(obj);
 		err = i915_gem_object_set_to_cpu_domain(obj, false);
 		i915_gem_object_unlock(obj);
@@ -100,11 +89,6 @@ static int igt_client_fill(void *arg)
 err_put:
 	i915_gem_object_put(obj);
 err_flush:
-	mutex_lock(&i915->drm.struct_mutex);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	if (err == -ENOMEM)
 		err = 0;
 
@@ -123,5 +107,5 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
 	if (!HAS_ENGINE(i915, BCS0))
 		return 0;
 
-	return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+	return i915_live_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 8f22d3f..861f32be 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -250,6 +250,9 @@ static bool needs_mi_store_dword(struct drm_i915_private *i915)
 	if (i915_terminally_wedged(i915))
 		return false;
 
+	if (!HAS_ENGINE(i915, RCS0))
+		return false;
+
 	return intel_engine_can_store_dword(i915->engine[RCS0]);
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eaa2b165..3abe15a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -31,7 +31,6 @@ static int live_nop_switch(void *arg)
 	struct intel_engine_cs *engine;
 	struct i915_gem_context **ctx;
 	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
 	struct igt_live_test t;
 	struct drm_file *file;
 	unsigned long n;
@@ -53,7 +52,6 @@ static int live_nop_switch(void *arg)
 		return PTR_ERR(file);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
 	ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx) {
@@ -152,7 +150,6 @@ static int live_nop_switch(void *arg)
 	}
 
 out_unlock:
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	mock_file_free(i915, file);
 	return err;
@@ -237,8 +234,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		    struct intel_engine_cs *engine,
 		    unsigned int dw)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+	struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
 	struct i915_request *rq;
 	struct i915_vma *vma;
 	struct i915_vma *batch;
@@ -431,6 +427,9 @@ create_test_object(struct i915_gem_context *ctx,
 	u64 size;
 	int err;
 
+	/* Keep in GEM's good graces */
+	i915_retire_requests(ctx->i915);
+
 	size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
 	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
 
@@ -507,7 +506,6 @@ static int igt_ctx_exec(void *arg)
 		dw = 0;
 		while (!time_after(jiffies, end_time)) {
 			struct i915_gem_context *ctx;
-			intel_wakeref_t wakeref;
 
 			ctx = live_context(i915, file);
 			if (IS_ERR(ctx)) {
@@ -523,8 +521,7 @@ static int igt_ctx_exec(void *arg)
 				}
 			}
 
-			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-				err = gpu_fill(obj, ctx, engine, dw);
+			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
@@ -565,6 +562,8 @@ static int igt_ctx_exec(void *arg)
 		mock_file_free(i915, file);
 		if (err)
 			return err;
+
+		i915_gem_drain_freed_objects(i915);
 	}
 
 	return 0;
@@ -623,7 +622,6 @@ static int igt_shared_ctx_exec(void *arg)
 		ncontexts = 0;
 		while (!time_after(jiffies, end_time)) {
 			struct i915_gem_context *ctx;
-			intel_wakeref_t wakeref;
 
 			ctx = kernel_context(i915);
 			if (IS_ERR(ctx)) {
@@ -642,9 +640,7 @@ static int igt_shared_ctx_exec(void *arg)
 				}
 			}
 
-			err = 0;
-			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-				err = gpu_fill(obj, ctx, engine, dw);
+			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
@@ -678,6 +674,10 @@ static int igt_shared_ctx_exec(void *arg)
 
 			dw += rem;
 		}
+
+		mutex_unlock(&i915->drm.struct_mutex);
+		i915_gem_drain_freed_objects(i915);
+		mutex_lock(&i915->drm.struct_mutex);
 	}
 out_test:
 	if (igt_live_test_end(&t))
@@ -1025,35 +1025,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 	       unsigned int flags)
 {
 	struct intel_engine_cs *engine = i915->engine[RCS0];
-	struct intel_sseu default_sseu = engine->sseu;
 	struct drm_i915_gem_object *obj;
 	struct i915_gem_context *ctx;
 	struct intel_context *ce;
 	struct intel_sseu pg_sseu;
-	intel_wakeref_t wakeref;
 	struct drm_file *file;
 	int ret;
 
-	if (INTEL_GEN(i915) < 9)
+	if (INTEL_GEN(i915) < 9 || !engine)
 		return 0;
 
 	if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
 		return 0;
 
-	if (hweight32(default_sseu.slice_mask) < 2)
+	if (hweight32(engine->sseu.slice_mask) < 2)
 		return 0;
 
 	/*
 	 * Gen11 VME friendly power-gated configuration with half enabled
 	 * sub-slices.
 	 */
-	pg_sseu = default_sseu;
+	pg_sseu = engine->sseu;
 	pg_sseu.slice_mask = 1;
 	pg_sseu.subslice_mask =
-		~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
+		~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
 
 	pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
-		name, flags, hweight32(default_sseu.slice_mask),
+		name, flags, hweight32(engine->sseu.slice_mask),
 		hweight32(pg_sseu.slice_mask));
 
 	file = mock_file(i915);
@@ -1078,12 +1076,10 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 		goto out_unlock;
 	}
 
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
 	ce = i915_gem_context_get_engine(ctx, RCS0);
 	if (IS_ERR(ce)) {
 		ret = PTR_ERR(ce);
-		goto out_rpm;
+		goto out_put;
 	}
 
 	ret = intel_context_pin(ce);
@@ -1091,7 +1087,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 		goto out_context;
 
 	/* First set the default mask. */
-	ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+	ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
 	if (ret)
 		goto out_fail;
 
@@ -1101,7 +1097,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 		goto out_fail;
 
 	/* Back to defaults. */
-	ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+	ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
 	if (ret)
 		goto out_fail;
 
@@ -1117,8 +1113,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 	intel_context_unpin(ce);
 out_context:
 	intel_context_put(ce);
-out_rpm:
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+out_put:
 	i915_gem_object_put(obj);
 
 out_unlock:
@@ -1207,8 +1202,6 @@ static int igt_ctx_readonly(void *arg)
 		unsigned int id;
 
 		for_each_engine(engine, i915, id) {
-			intel_wakeref_t wakeref;
-
 			if (!intel_engine_can_store_dword(engine))
 				continue;
 
@@ -1223,9 +1216,7 @@ static int igt_ctx_readonly(void *arg)
 					i915_gem_object_set_readonly(obj);
 			}
 
-			err = 0;
-			with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-				err = gpu_fill(obj, ctx, engine, dw);
+			err = gpu_fill(obj, ctx, engine, dw);
 			if (err) {
 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
 				       ndwords, dw, max_dwords(obj),
@@ -1488,7 +1479,6 @@ static int igt_vm_isolation(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct i915_gem_context *ctx_a, *ctx_b;
 	struct intel_engine_cs *engine;
-	intel_wakeref_t wakeref;
 	struct igt_live_test t;
 	struct drm_file *file;
 	I915_RND_STATE(prng);
@@ -1535,8 +1525,6 @@ static int igt_vm_isolation(void *arg)
 	GEM_BUG_ON(ctx_b->vm->total != vm_total);
 	vm_total -= I915_GTT_PAGE_SIZE;
 
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
 	count = 0;
 	for_each_engine(engine, i915, id) {
 		IGT_TIMEOUT(end_time);
@@ -1560,7 +1548,7 @@ static int igt_vm_isolation(void *arg)
 				err = read_from_scratch(ctx_b, engine,
 							offset, &value);
 			if (err)
-				goto out_rpm;
+				goto out_unlock;
 
 			if (value) {
 				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1569,7 +1557,7 @@ static int igt_vm_isolation(void *arg)
 				       lower_32_bits(offset),
 				       this);
 				err = -EINVAL;
-				goto out_rpm;
+				goto out_unlock;
 			}
 
 			this++;
@@ -1579,8 +1567,6 @@ static int igt_vm_isolation(void *arg)
 	pr_info("Checked %lu scratch offsets across %d engines\n",
 		count, RUNTIME_INFO(i915)->num_engines);
 
-out_rpm:
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 out_unlock:
 	if (igt_live_test_end(&t))
 		err = -EIO;
@@ -1750,5 +1736,5 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 	if (i915_terminally_wedged(dev_priv))
 		return 0;
 
-	return i915_subtests(tests, dev_priv);
+	return i915_live_subtests(tests, dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5c81f4b..b95fdc2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,6 +6,7 @@
 
 #include <linux/prime_numbers.h>
 
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "huge_gem_object.h"
 #include "i915_selftest.h"
@@ -143,7 +144,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
 		if (offset >= obj->base.size)
 			continue;
 
-		i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
+		intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
 
 		p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
 		cpu = kmap(p) + offset_in_page(offset);
@@ -327,7 +328,8 @@ next_tiling: ;
 static int make_obj_busy(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct i915_request *rq;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct i915_vma *vma;
 	int err;
 
@@ -339,18 +341,22 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 	if (err)
 		return err;
 
-	rq = i915_request_create(i915->engine[RCS0]->kernel_context);
-	if (IS_ERR(rq)) {
-		i915_vma_unpin(vma);
-		return PTR_ERR(rq);
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+
+		rq = i915_request_create(engine->kernel_context);
+		if (IS_ERR(rq)) {
+			i915_vma_unpin(vma);
+			return PTR_ERR(rq);
+		}
+
+		i915_vma_lock(vma);
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+		i915_vma_unlock(vma);
+
+		i915_request_add(rq);
 	}
 
-	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
-
-	i915_request_add(rq);
-
 	i915_vma_unpin(vma);
 	i915_gem_object_put(obj); /* leave it only alive via its active ref */
 
@@ -378,7 +384,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 {
 	i915_gem_shrinker_unregister(i915);
 
-	intel_gt_pm_get(i915);
+	intel_gt_pm_get(&i915->gt);
 
 	cancel_delayed_work_sync(&i915->gem.retire_work);
 	flush_work(&i915->gem.idle_work);
@@ -386,7 +392,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
-	intel_gt_pm_put(i915);
+	intel_gt_pm_put(&i915->gt);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	igt_flush_test(i915, I915_WAIT_LOCKED);
@@ -413,7 +419,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
 		resv.start = hole_start;
 		resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
+		mutex_lock(&i915->drm.struct_mutex);
 		err = drm_mm_reserve_node(mm, &resv);
+		mutex_unlock(&i915->drm.struct_mutex);
 		if (err) {
 			pr_err("Failed to trim VMA manager, err=%d\n", err);
 			goto out_park;
@@ -474,19 +482,12 @@ static int igt_mmap_offset_exhaustion(void *arg)
 			pr_err("[loop %d] Failed to busy the object\n", loop);
 			goto err_obj;
 		}
-
-		/* NB we rely on the _active_ reference to access obj now */
-		GEM_BUG_ON(!i915_gem_object_is_active(obj));
-		err = create_mmap_offset(obj);
-		if (err) {
-			pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
-			       loop, err);
-			goto out;
-		}
 	}
 
 out:
+	mutex_lock(&i915->drm.struct_mutex);
 	drm_mm_remove_node(&resv);
+	mutex_unlock(&i915->drm.struct_mutex);
 out_park:
 	restore_retire_worker(i915);
 	return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e23d8c9..11d3723 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -11,8 +11,8 @@
 
 static int igt_fill_blt(void *arg)
 {
-	struct intel_context *ce = arg;
-	struct drm_i915_private *i915 = ce->gem_context->i915;
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
 	struct drm_i915_gem_object *obj;
 	struct rnd_state prng;
 	IGT_TIMEOUT(end);
@@ -83,11 +83,6 @@ static int igt_fill_blt(void *arg)
 err_put:
 	i915_gem_object_put(obj);
 err_flush:
-	mutex_lock(&i915->drm.struct_mutex);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	if (err == -ENOMEM)
 		err = 0;
 
@@ -106,5 +101,5 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
 	if (!HAS_ENGINE(i915, BCS0))
 		return 0;
 
-	return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+	return i915_live_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
index 1c75b5c..eec6961 100644
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ b/drivers/gpu/drm/i915/gt/Makefile
@@ -1,2 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
 # Extra header tests
 include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
similarity index 100%
rename from drivers/gpu/drm/i915/intel_renderstate_gen6.c
rename to drivers/gpu/drm/i915/gt/gen6_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
similarity index 100%
rename from drivers/gpu/drm/i915/intel_renderstate_gen7.c
rename to drivers/gpu/drm/i915/gt/gen7_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
similarity index 100%
rename from drivers/gpu/drm/i915/intel_renderstate_gen8.c
rename to drivers/gpu/drm/i915/gt/gen8_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
similarity index 100%
rename from drivers/gpu/drm/i915/intel_renderstate_gen9.c
rename to drivers/gpu/drm/i915/gt/gen9_renderstate.c
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2c454f2..1110fc8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -59,6 +59,10 @@ int __intel_context_do_pin(struct intel_context *ce)
 		if (err)
 			goto err;
 
+		GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
+			  ce->engine->name, ce->ring->timeline->fence_context,
+			  ce->ring->head, ce->ring->tail);
+
 		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
 
 		smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -85,6 +89,9 @@ void intel_context_unpin(struct intel_context *ce)
 	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
 
 	if (likely(atomic_dec_and_test(&ce->pin_count))) {
+		GEM_TRACE("%s context:%llx retire\n",
+			  ce->engine->name, ce->ring->timeline->fence_context);
+
 		ce->ops->unpin(ce);
 
 		i915_gem_context_put(ce->gem_context);
@@ -95,11 +102,15 @@ void intel_context_unpin(struct intel_context *ce)
 	intel_context_put(ce);
 }
 
-static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
+static int __context_pin_state(struct i915_vma *vma)
 {
+	u64 flags;
 	int err;
 
-	err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+	flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
+	flags |= PIN_HIGH | PIN_GLOBAL;
+
+	err = i915_vma_pin(vma, 0, 0, flags);
 	if (err)
 		return err;
 
@@ -119,16 +130,57 @@ static void __context_unpin_state(struct i915_vma *vma)
 	__i915_vma_unpin(vma);
 }
 
-static void intel_context_retire(struct i915_active *active)
+static void __intel_context_retire(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
 
+	GEM_TRACE("%s context:%llx retire\n",
+		  ce->engine->name, ce->ring->timeline->fence_context);
+
 	if (ce->state)
 		__context_unpin_state(ce->state);
 
+	intel_ring_unpin(ce->ring);
 	intel_context_put(ce);
 }
 
+static int __intel_context_active(struct i915_active *active)
+{
+	struct intel_context *ce = container_of(active, typeof(*ce), active);
+	int err;
+
+	intel_context_get(ce);
+
+	err = intel_ring_pin(ce->ring);
+	if (err)
+		goto err_put;
+
+	if (!ce->state)
+		return 0;
+
+	err = __context_pin_state(ce->state);
+	if (err)
+		goto err_ring;
+
+	/* Preallocate tracking nodes */
+	if (!i915_gem_context_is_kernel(ce->gem_context)) {
+		err = i915_active_acquire_preallocate_barrier(&ce->active,
+							      ce->engine);
+		if (err)
+			goto err_state;
+	}
+
+	return 0;
+
+err_state:
+	__context_unpin_state(ce->state);
+err_ring:
+	intel_ring_unpin(ce->ring);
+err_put:
+	intel_context_put(ce);
+	return err;
+}
+
 void
 intel_context_init(struct intel_context *ce,
 		   struct i915_gem_context *ctx,
@@ -148,46 +200,8 @@ intel_context_init(struct intel_context *ce,
 
 	mutex_init(&ce->pin_mutex);
 
-	i915_active_init(ctx->i915, &ce->active, intel_context_retire);
-}
-
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
-{
-	int err;
-
-	if (!i915_active_acquire(&ce->active))
-		return 0;
-
-	intel_context_get(ce);
-
-	if (!ce->state)
-		return 0;
-
-	err = __context_pin_state(ce->state, flags);
-	if (err) {
-		i915_active_cancel(&ce->active);
-		intel_context_put(ce);
-		return err;
-	}
-
-	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
-		err = i915_active_acquire_preallocate_barrier(&ce->active,
-							      ce->engine);
-		if (err) {
-			i915_active_release(&ce->active);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-void intel_context_active_release(struct intel_context *ce)
-{
-	/* Nodes preallocated in intel_context_active() */
-	i915_active_acquire_barrier(&ce->active);
-	i915_active_release(&ce->active);
+	i915_active_init(ctx->i915, &ce->active,
+			 __intel_context_active, __intel_context_retire);
 }
 
 static void i915_global_context_shrink(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a47275b..40cd832 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -9,6 +9,7 @@
 
 #include <linux/lockdep.h>
 
+#include "i915_active.h"
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
 
@@ -102,8 +103,17 @@ static inline void intel_context_exit(struct intel_context *ce)
 		ce->ops->exit(ce);
 }
 
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
-void intel_context_active_release(struct intel_context *ce);
+static inline int intel_context_active_acquire(struct intel_context *ce)
+{
+	return i915_active_acquire(&ce->active);
+}
+
+static inline void intel_context_active_release(struct intel_context *ce)
+{
+	/* Nodes preallocated in intel_context_active() */
+	i915_active_acquire_barrier(&ce->active);
+	i915_active_release(&ce->active);
+}
 
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 08049ee..4c0e211 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 
 #include "i915_active_types.h"
+#include "i915_utils.h"
 #include "intel_engine_types.h"
 #include "intel_sseu.h"
 
@@ -38,6 +39,10 @@ struct intel_context {
 	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *inflight;
+#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
+#define intel_context_inflight_count(ce)  ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight)
+#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight)
 
 	struct list_head signal_link;
 	struct list_head signals;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 2f1c687..0331e9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -14,7 +14,7 @@
 #include "i915_reg.h"
 #include "i915_request.h"
 #include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
 #include "intel_engine_types.h"
 #include "intel_gpu_commands.h"
 #include "intel_workarounds.h"
@@ -51,7 +51,7 @@ struct drm_printer;
 #define ENGINE_READ16(...)	__ENGINE_READ_OP(read16, __VA_ARGS__)
 #define ENGINE_READ(...)	__ENGINE_READ_OP(read, __VA_ARGS__)
 #define ENGINE_READ_FW(...)	__ENGINE_READ_OP(read_fw, __VA_ARGS__)
-#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
 #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
 
 #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
@@ -125,71 +125,26 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 
 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
 
-static inline void
-execlists_set_active(struct intel_engine_execlists *execlists,
-		     unsigned int bit)
-{
-	__set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline bool
-execlists_set_active_once(struct intel_engine_execlists *execlists,
-			  unsigned int bit)
-{
-	return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_active(struct intel_engine_execlists *execlists,
-		       unsigned int bit)
-{
-	__clear_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_all_active(struct intel_engine_execlists *execlists)
-{
-	execlists->active = 0;
-}
-
-static inline bool
-execlists_is_active(const struct intel_engine_execlists *execlists,
-		    unsigned int bit)
-{
-	return test_bit(bit, (unsigned long *)&execlists->active);
-}
-
-void execlists_user_begin(struct intel_engine_execlists *execlists,
-			  const struct execlist_port *port);
-void execlists_user_end(struct intel_engine_execlists *execlists);
-
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-
 static inline unsigned int
 execlists_num_ports(const struct intel_engine_execlists * const execlists)
 {
 	return execlists->port_mask + 1;
 }
 
-static inline struct execlist_port *
-execlists_port_complete(struct intel_engine_execlists * const execlists,
-			struct execlist_port * const port)
+static inline struct i915_request *
+execlists_active(const struct intel_engine_execlists *execlists)
 {
-	const unsigned int m = execlists->port_mask;
-
-	GEM_BUG_ON(port_index(port, execlists) != 0);
-	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-
-	memmove(port, port + 1, m * sizeof(struct execlist_port));
-	memset(port + m, 0, sizeof(struct execlist_port));
-
-	return port;
+	GEM_BUG_ON(execlists->active - execlists->inflight >
+		   execlists_num_ports(execlists));
+	return READ_ONCE(*execlists->active);
 }
 
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
+
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
 static inline u32
 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
 {
@@ -245,7 +200,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_timeline *timeline,
+			 struct intel_timeline *timeline,
 			 int size);
 int intel_ring_pin(struct intel_ring *ring);
 void intel_ring_reset(struct intel_ring *ring, u32 tail);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7fd33e8..bdf279f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,6 +28,8 @@
 
 #include "i915_drv.h"
 
+#include "gt/intel_gt.h"
+
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
 #include "intel_context.h"
@@ -314,6 +316,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	engine->id = id;
 	engine->mask = BIT(id);
 	engine->i915 = dev_priv;
+	engine->gt = &dev_priv->gt;
 	engine->uncore = &dev_priv->uncore;
 	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
@@ -445,15 +448,9 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
 	if (WARN_ON(mask != engine_mask))
 		device_info->engine_mask = mask;
 
-	/* We always presume we have at least RCS available for later probing */
-	if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
-		err = -ENODEV;
-		goto cleanup;
-	}
-
 	RUNTIME_INFO(i915)->num_engines = hweight32(mask);
 
-	i915_check_and_clear_faults(i915);
+	intel_gt_check_and_clear_faults(&i915->gt);
 
 	intel_setup_engine_capabilities(i915);
 
@@ -508,6 +505,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
 	GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
 	GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
+	memset(execlists->pending, 0, sizeof(execlists->pending));
+	execlists->active =
+		memset(execlists->inflight, 0, sizeof(execlists->inflight));
+
 	execlists->queue_priority_hint = INT_MIN;
 	execlists->queue = RB_ROOT_CACHED;
 }
@@ -577,7 +578,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err;
@@ -629,6 +630,10 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
 	engine->sseu =
 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
 
+	intel_engine_init_workarounds(engine);
+	intel_engine_init_whitelist(engine);
+	intel_engine_init_ctx_wa(engine);
+
 	return 0;
 }
 
@@ -681,9 +686,10 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
 		u8 engine;
 		u8 sched;
 	} map[] = {
-#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
-		MAP(PREEMPTION, PREEMPTION),
-		MAP(SEMAPHORES, SEMAPHORES),
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+		MAP(HAS_PREEMPTION, PREEMPTION),
+		MAP(HAS_SEMAPHORES, SEMAPHORES),
+		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
 #undef MAP
 	};
 	struct intel_engine_cs *engine;
@@ -717,7 +723,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
 
 struct measure_breadcrumb {
 	struct i915_request rq;
-	struct i915_timeline timeline;
+	struct intel_timeline timeline;
 	struct intel_ring ring;
 	u32 cs[1024];
 };
@@ -727,15 +733,15 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 	struct measure_breadcrumb *frame;
 	int dw = -ENOMEM;
 
-	GEM_BUG_ON(!engine->i915->gt.scratch);
+	GEM_BUG_ON(!engine->gt->scratch);
 
 	frame = kzalloc(sizeof(*frame), GFP_KERNEL);
 	if (!frame)
 		return -ENOMEM;
 
-	if (i915_timeline_init(engine->i915,
-			       &frame->timeline,
-			       engine->status_page.vma))
+	if (intel_timeline_init(&frame->timeline,
+				engine->gt,
+				engine->status_page.vma))
 		goto out_frame;
 
 	INIT_LIST_HEAD(&frame->ring.request_list);
@@ -750,17 +756,17 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 	frame->rq.ring = &frame->ring;
 	frame->rq.timeline = &frame->timeline;
 
-	dw = i915_timeline_pin(&frame->timeline);
+	dw = intel_timeline_pin(&frame->timeline);
 	if (dw < 0)
 		goto out_timeline;
 
 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
 	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
 
-	i915_timeline_unpin(&frame->timeline);
+	intel_timeline_unpin(&frame->timeline);
 
 out_timeline:
-	i915_timeline_fini(&frame->timeline);
+	intel_timeline_fini(&frame->timeline);
 out_frame:
 	kfree(frame);
 	return dw;
@@ -1148,14 +1154,14 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 	if (i915_reset_failed(engine->i915))
 		return true;
 
-	if (!intel_wakeref_active(&engine->wakeref))
+	if (!intel_engine_pm_is_awake(engine))
 		return true;
 
 	/* Waiting to drain ELSP? */
-	if (READ_ONCE(engine->execlists.active)) {
+	if (execlists_active(&engine->execlists)) {
 		struct tasklet_struct *t = &engine->execlists.tasklet;
 
-		synchronize_hardirq(engine->i915->drm.irq);
+		synchronize_hardirq(engine->i915->drm.pdev->irq);
 
 		local_bh_disable();
 		if (tasklet_trylock(t)) {
@@ -1169,7 +1175,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 		/* Otherwise flush the tasklet if it was on another cpu */
 		tasklet_unlock_wait(t);
 
-		if (READ_ONCE(engine->execlists.active))
+		if (execlists_active(&engine->execlists))
 			return false;
 	}
 
@@ -1367,6 +1373,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 	}
 
 	if (HAS_EXECLISTS(dev_priv)) {
+		struct i915_request * const *port, *rq;
 		const u32 *hws =
 			&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
 		const u8 num_entries = execlists->csb_size;
@@ -1399,27 +1406,33 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 		}
 
 		spin_lock_irqsave(&engine->active.lock, flags);
-		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-			struct i915_request *rq;
-			unsigned int count;
+		for (port = execlists->active; (rq = *port); port++) {
+			char hdr[80];
+			int len;
+
+			len = snprintf(hdr, sizeof(hdr),
+				       "\t\tActive[%d: ",
+				       (int)(port - execlists->active));
+			if (!i915_request_signaled(rq))
+				len += snprintf(hdr + len, sizeof(hdr) - len,
+						"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
+						i915_ggtt_offset(rq->ring->vma),
+						rq->timeline->hwsp_offset,
+						hwsp_seqno(rq));
+			snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+			print_request(m, rq, hdr);
+		}
+		for (port = execlists->pending; (rq = *port); port++) {
 			char hdr[80];
 
-			rq = port_unpack(&execlists->port[idx], &count);
-			if (!rq) {
-				drm_printf(m, "\t\tELSP[%d] idle\n", idx);
-			} else if (!i915_request_signaled(rq)) {
-				snprintf(hdr, sizeof(hdr),
-					 "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
-					 idx, count,
-					 i915_ggtt_offset(rq->ring->vma),
-					 rq->timeline->hwsp_offset,
-					 hwsp_seqno(rq));
-				print_request(m, rq, hdr);
-			} else {
-				print_request(m, rq, "\t\tELSP[%d] rq: ");
-			}
+			snprintf(hdr, sizeof(hdr),
+				 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
+				 (int)(port - execlists->pending),
+				 i915_ggtt_offset(rq->ring->vma),
+				 rq->timeline->hwsp_offset,
+				 hwsp_seqno(rq));
+			print_request(m, rq, hdr);
 		}
-		drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
 		spin_unlock_irqrestore(&engine->active.lock, flags);
 	} else if (INTEL_GEN(dev_priv) > 6) {
 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1583,15 +1596,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 	}
 
 	if (engine->stats.enabled++ == 0) {
-		const struct execlist_port *port = execlists->port;
-		unsigned int num_ports = execlists_num_ports(execlists);
+		struct i915_request * const *port;
+		struct i915_request *rq;
 
 		engine->stats.enabled_at = ktime_get();
 
 		/* XXX submission method oblivious? */
-		while (num_ports-- && port_isset(port)) {
+		for (port = execlists->active; (rq = *port); port++)
 			engine->stats.active++;
-			port++;
+
+		for (port = execlists->pending; (rq = *port); port++) {
+			/* Exclude any contexts already counted in active */
+			if (intel_context_inflight_count(rq->hw_context) == 1)
+				engine->stats.active++;
 		}
 
 		if (engine->stats.active)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3..84e432a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
 	GEM_TRACE("%s\n", engine->name);
 
-	intel_gt_pm_get(engine->i915);
+	intel_gt_pm_get(engine->gt);
 
 	/* Pin the default state for fast resets from atomic context. */
 	map = NULL;
@@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf)
 
 	engine->execlists.no_priolist = false;
 
-	intel_gt_pm_put(engine->i915);
+	intel_gt_pm_put(engine->gt);
 	return 0;
 }
 
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
 {
 	intel_wakeref_init(&engine->wakeref);
 }
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	intel_gt_pm_get(i915);
-	for_each_engine(engine, i915, id) {
-		intel_engine_pm_get(engine);
-		engine->serial++; /* kernel context lost */
-		err = engine->resume(engine);
-		intel_engine_pm_put(engine);
-		if (err) {
-			dev_err(i915->drm.dev,
-				"Failed to restart %s (%d)\n",
-				engine->name, err);
-			break;
-		}
-	}
-	intel_gt_pm_put(i915);
-
-	return err;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd9..015ac72 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,16 +7,28 @@
 #ifndef INTEL_ENGINE_PM_H
 #define INTEL_ENGINE_PM_H
 
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
 struct drm_i915_private;
-struct intel_engine_cs;
 
 void intel_engine_pm_get(struct intel_engine_cs *engine);
 void intel_engine_pm_put(struct intel_engine_cs *engine);
 
+static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+	return intel_wakeref_is_active(&engine->wakeref);
+}
+
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+	return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
 void intel_engine_park(struct intel_engine_cs *engine);
 
 void intel_engine_init__pm(struct intel_engine_cs *engine);
 
-int intel_engines_resume(struct drm_i915_private *i915);
-
 #endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 868b220..7e05611 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -12,6 +12,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/llist.h>
+#include <linux/timer.h>
 #include <linux/types.h>
 
 #include "i915_gem.h"
@@ -19,7 +20,7 @@
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "i915_timeline_types.h"
+#include "gt/intel_timeline_types.h"
 #include "intel_sseu.h"
 #include "intel_wakeref.h"
 #include "intel_workarounds_types.h"
@@ -35,6 +36,7 @@ struct drm_i915_reg_table;
 struct i915_gem_context;
 struct i915_request;
 struct i915_sched_attr;
+struct intel_gt;
 struct intel_uncore;
 
 typedef u8 intel_engine_mask_t;
@@ -66,10 +68,22 @@ struct intel_ring {
 	struct i915_vma *vma;
 	void *vaddr;
 
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 	struct list_head request_list;
 	struct list_head active_link;
 
+	/*
+	 * As we have two types of rings, one global to the engine used
+	 * by ringbuffer submission and those that are exclusive to a
+	 * context used by execlists, we have to play safe and allow
+	 * atomic updates to the pin_count. However, the actual pinning
+	 * of the context is either done during initialisation for
+	 * ringbuffer submission or serialised as part of the context
+	 * pinning for execlists, and so we do not need a mutex ourselves
+	 * to serialise intel_ring_pin/intel_ring_unpin.
+	 */
+	atomic_t pin_count;
+
 	u32 head;
 	u32 tail;
 	u32 emit;
@@ -138,6 +152,11 @@ struct intel_engine_execlists {
 	struct tasklet_struct tasklet;
 
 	/**
+	 * @timer: kick the current context if its timeslice expires
+	 */
+	struct timer_list timer;
+
+	/**
 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
 	 */
 	struct i915_priolist default_priolist;
@@ -160,51 +179,28 @@ struct intel_engine_execlists {
 	 */
 	u32 __iomem *ctrl_reg;
 
-	/**
-	 * @port: execlist port states
-	 *
-	 * For each hardware ELSP (ExecList Submission Port) we keep
-	 * track of the last request and the number of times we submitted
-	 * that port to hw. We then count the number of times the hw reports
-	 * a context completion or preemption. As only one context can
-	 * be active on hw, we limit resubmission of context to port[0]. This
-	 * is called Lite Restore, of the context.
-	 */
-	struct execlist_port {
-		/**
-		 * @request_count: combined request and submission count
-		 */
-		struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
-		/**
-		 * @context_id: context ID for port
-		 */
-		GEM_DEBUG_DECL(u32 context_id);
-
 #define EXECLIST_MAX_PORTS 2
-	} port[EXECLIST_MAX_PORTS];
-
 	/**
-	 * @active: is the HW active? We consider the HW as active after
-	 * submitting any context for execution and until we have seen the
-	 * last context completion event. After that, we do not expect any
-	 * more events until we submit, and so can park the HW.
-	 *
-	 * As we have a small number of different sources from which we feed
-	 * the HW, we track the state of each inside a single bitfield.
+	 * @active: the currently known context executing on HW
 	 */
-	unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
+	struct i915_request * const *active;
+	/**
+	 * @inflight: the set of contexts submitted and acknowleged by HW
+	 *
+	 * The set of inflight contexts is managed by reading CS events
+	 * from the HW. On a context-switch event (not preemption), we
+	 * know the HW has transitioned from port0 to port1, and we
+	 * advance our inflight/active tracking accordingly.
+	 */
+	struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
+	/**
+	 * @pending: the next set of contexts submitted to ELSP
+	 *
+	 * We store the array of contexts that we submit to HW (via ELSP) and
+	 * promote them to the inflight array once HW has signaled the
+	 * preemption or idle-to-active event.
+	 */
+	struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
 
 	/**
 	 * @port_mask: number of execlist ports - 1
@@ -246,11 +242,6 @@ struct intel_engine_execlists {
 	u32 *csb_status;
 
 	/**
-	 * @preempt_complete_status: expected CSB upon completing preemption
-	 */
-	u32 preempt_complete_status;
-
-	/**
 	 * @csb_size: context status buffer FIFO size
 	 */
 	u8 csb_size;
@@ -267,6 +258,7 @@ struct intel_engine_execlists {
 
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
+	struct intel_gt *gt;
 	struct intel_uncore *uncore;
 	char name[INTEL_ENGINE_CS_MAX_NAME];
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
new file mode 100644
index 0000000..8cca6b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_uncore.h"
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+	gt->i915 = i915;
+	gt->uncore = &i915->uncore;
+
+	INIT_LIST_HEAD(&gt->active_rings);
+	INIT_LIST_HEAD(&gt->closed_vma);
+
+	spin_lock_init(&gt->closed_lock);
+
+	intel_gt_pm_init_early(gt);
+}
+
+void intel_gt_init_hw(struct drm_i915_private *i915)
+{
+	i915->gt.ggtt = &i915->ggtt;
+}
+
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+	intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+	intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
+{
+	intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+	GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+void
+intel_gt_clear_error_registers(struct intel_gt *gt,
+			       intel_engine_mask_t engine_mask)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 eir;
+
+	if (!IS_GEN(i915, 2))
+		clear_register(uncore, PGTBL_ER);
+
+	if (INTEL_GEN(i915) < 4)
+		clear_register(uncore, IPEIR(RENDER_RING_BASE));
+	else
+		clear_register(uncore, IPEIR_I965);
+
+	clear_register(uncore, EIR);
+	eir = intel_uncore_read(uncore, EIR);
+	if (eir) {
+		/*
+		 * some errors might have become stuck,
+		 * mask them.
+		 */
+		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+		rmw_set(uncore, EMR, eir);
+		intel_uncore_write(uncore, GEN2_IIR,
+				   I915_MASTER_ERROR_INTERRUPT);
+	}
+
+	if (INTEL_GEN(i915) >= 8) {
+		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+	} else if (INTEL_GEN(i915) >= 6) {
+		struct intel_engine_cs *engine;
+		enum intel_engine_id id;
+
+		for_each_engine_masked(engine, i915, engine_mask, id)
+			gen8_clear_engine_error_register(engine);
+	}
+}
+
+static void gen6_check_faults(struct intel_gt *gt)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	u32 fault;
+
+	for_each_engine(engine, gt->i915, id) {
+		fault = GEN6_RING_FAULT_REG_READ(engine);
+		if (fault & RING_FAULT_VALID) {
+			DRM_DEBUG_DRIVER("Unexpected fault\n"
+					 "\tAddr: 0x%08lx\n"
+					 "\tAddress space: %s\n"
+					 "\tSource ID: %d\n"
+					 "\tType: %d\n",
+					 fault & PAGE_MASK,
+					 fault & RING_FAULT_GTTSEL_MASK ?
+					 "GGTT" : "PPGTT",
+					 RING_FAULT_SRCID(fault),
+					 RING_FAULT_FAULT_TYPE(fault));
+		}
+	}
+}
+
+static void gen8_check_faults(struct intel_gt *gt)
+{
+	struct intel_uncore *uncore = gt->uncore;
+	u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG);
+
+	if (fault & RING_FAULT_VALID) {
+		u32 fault_data0, fault_data1;
+		u64 fault_addr;
+
+		fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0);
+		fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1);
+		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+			     ((u64)fault_data0 << 12);
+
+		DRM_DEBUG_DRIVER("Unexpected fault\n"
+				 "\tAddr: 0x%08x_%08x\n"
+				 "\tAddress space: %s\n"
+				 "\tEngine ID: %d\n"
+				 "\tSource ID: %d\n"
+				 "\tType: %d\n",
+				 upper_32_bits(fault_addr),
+				 lower_32_bits(fault_addr),
+				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+				 GEN8_RING_FAULT_ENGINE_ID(fault),
+				 RING_FAULT_SRCID(fault),
+				 RING_FAULT_FAULT_TYPE(fault));
+	}
+}
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+
+	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
+	if (INTEL_GEN(i915) >= 8)
+		gen8_check_faults(gt);
+	else if (INTEL_GEN(i915) >= 6)
+		gen6_check_faults(gt);
+	else
+		return;
+
+	intel_gt_clear_error_registers(gt, ALL_ENGINES);
+}
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	intel_wakeref_t wakeref;
+
+	/*
+	 * No actual flushing is required for the GTT write domain for reads
+	 * from the GTT domain. Writes to it "immediately" go to main memory
+	 * as far as we know, so there's no chipset flush. It also doesn't
+	 * land in the GPU render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
+	 *
+	 * We also have to wait a bit for the writes to land from the GTT.
+	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+	 * timing. This issue has only been observed when switching quickly
+	 * between GTT writes and CPU reads from inside the kernel on recent hw,
+	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
+	 * system agents we cannot reproduce this behaviour, until Cannonlake
+	 * that was!).
+	 */
+
+	wmb();
+
+	if (INTEL_INFO(i915)->has_coherent_ggtt)
+		return;
+
+	intel_gt_chipset_flush(gt);
+
+	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+		struct intel_uncore *uncore = gt->uncore;
+
+		spin_lock_irq(&uncore->lock);
+		intel_uncore_posting_read_fw(uncore,
+					     RING_HEAD(RENDER_RING_BASE));
+		spin_unlock_irq(&uncore->lock);
+	}
+}
+
+void intel_gt_chipset_flush(struct intel_gt *gt)
+{
+	wmb();
+	if (INTEL_GEN(gt->i915) < 6)
+		intel_gtt_chipset_flush();
+}
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int ret;
+
+	obj = i915_gem_object_create_stolen(i915, size);
+	if (!obj)
+		obj = i915_gem_object_create_internal(i915, size);
+	if (IS_ERR(obj)) {
+		DRM_ERROR("Failed to allocate scratch page\n");
+		return PTR_ERR(obj);
+	}
+
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto err_unref;
+	}
+
+	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	if (ret)
+		goto err_unref;
+
+	gt->scratch = vma;
+	return 0;
+
+err_unref:
+	i915_gem_object_put(obj);
+	return ret;
+}
+
+void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+	i915_vma_unpin_and_release(&gt->scratch, 0);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
new file mode 100644
index 0000000..1093dcf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT__
+#define __INTEL_GT__
+
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+
+struct drm_i915_private;
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void intel_gt_init_hw(struct drm_i915_private *i915);
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt);
+void intel_gt_clear_error_registers(struct intel_gt *gt,
+				    intel_engine_mask_t engine_mask);
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
+void intel_gt_chipset_flush(struct intel_gt *gt);
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
+void intel_gt_fini_scratch(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
+					  enum intel_gt_scratch_field field)
+{
+	return i915_ggtt_offset(gt->scratch) + field;
+}
+
+#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7b59677..36ba80e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
  */
 
 #include "i915_drv.h"
+#include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 #include "intel_pm.h"
 #include "intel_wakeref.h"
@@ -50,9 +51,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
 	return 0;
 }
 
-void intel_gt_pm_get(struct drm_i915_private *i915)
+void intel_gt_pm_get(struct intel_gt *gt)
 {
-	intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+	struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+	intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
 }
 
 static int intel_gt_park(struct intel_wakeref *wf)
@@ -75,15 +78,17 @@ static int intel_gt_park(struct intel_wakeref *wf)
 	return 0;
 }
 
-void intel_gt_pm_put(struct drm_i915_private *i915)
+void intel_gt_pm_put(struct intel_gt *gt)
 {
-	intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+	struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+	intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
 }
 
-void intel_gt_pm_init(struct drm_i915_private *i915)
+void intel_gt_pm_init_early(struct intel_gt *gt)
 {
-	intel_wakeref_init(&i915->gt.wakeref);
-	BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+	intel_wakeref_init(&gt->wakeref);
+	BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
 }
 
 static bool reset_engines(struct drm_i915_private *i915)
@@ -96,7 +101,7 @@ static bool reset_engines(struct drm_i915_private *i915)
 
 /**
  * intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
  * @force: ignore a failed reset and sanitize engine state anyway
  *
  * Anytime we reset the GPU, either with an explicit GPU reset or through a
@@ -104,24 +109,25 @@ static bool reset_engines(struct drm_i915_private *i915)
  * to match. Note that calling intel_gt_sanitize() if the GPU has not
  * been reset results in much confusion!
  */
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	GEM_TRACE("\n");
 
-	if (!reset_engines(i915) && !force)
+	if (!reset_engines(gt->i915) && !force)
 		return;
 
-	for_each_engine(engine, i915, id)
+	for_each_engine(engine, gt->i915, id)
 		intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	int err = 0;
 
 	/*
 	 * After resume, we may need to poke into the pinned kernel
@@ -129,9 +135,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
 	 * Only the kernel contexts should remain pinned over suspend,
 	 * allowing us to fixup the user contexts on their first pin.
 	 */
-	for_each_engine(engine, i915, id) {
+	intel_gt_pm_get(gt);
+	for_each_engine(engine, gt->i915, id) {
 		struct intel_context *ce;
 
+		intel_engine_pm_get(engine);
+
 		ce = engine->kernel_context;
 		if (ce)
 			ce->ops->reset(ce);
@@ -139,5 +148,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
 		ce = engine->preempt_context;
 		if (ce)
 			ce->ops->reset(ce);
+
+		engine->serial++; /* kernel context lost */
+		err = engine->resume(engine);
+
+		intel_engine_pm_put(engine);
+		if (err) {
+			dev_err(gt->i915->drm.dev,
+				"Failed to restart %s (%d)\n",
+				engine->name, err);
+			break;
+		}
 	}
+	intel_gt_pm_put(gt);
+
+	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 7dd1130..ba960e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,19 +9,19 @@
 
 #include <linux/types.h>
 
-struct drm_i915_private;
+struct intel_gt;
 
 enum {
 	INTEL_GT_UNPARK,
 	INTEL_GT_PARK,
 };
 
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+void intel_gt_pm_get(struct intel_gt *gt);
+void intel_gt_pm_put(struct intel_gt *gt);
 
-void intel_gt_pm_init(struct drm_i915_private *i915);
+void intel_gt_pm_init_early(struct intel_gt *gt);
 
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+int intel_gt_resume(struct intel_gt *gt);
 
 #endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
new file mode 100644
index 0000000..3563ce9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "i915_vma.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_uncore;
+
+struct intel_gt {
+	struct drm_i915_private *i915;
+	struct intel_uncore *uncore;
+	struct i915_ggtt *ggtt;
+
+	struct intel_gt_timelines {
+		struct mutex mutex; /* protects list */
+		struct list_head active_list;
+
+		/* Pack multiple timelines' seqnos into the same page */
+		spinlock_t hwsp_lock;
+		struct list_head hwsp_free_list;
+	} timelines;
+
+	struct list_head active_rings;
+
+	struct intel_wakeref wakeref;
+
+	struct list_head closed_vma;
+	spinlock_t closed_lock; /* guards the list of closed_vma */
+
+	/**
+	 * Is the GPU currently considered idle, or busy executing
+	 * userspace requests? Whilst idle, we allow runtime power
+	 * management to power down the hardware and display clocks.
+	 * In order to reduce the effect on performance, there
+	 * is a slight delay before we do so.
+	 */
+	intel_wakeref_t awake;
+
+	struct blocking_notifier_head pm_notifications;
+
+	ktime_t last_init_time;
+
+	struct i915_vma *scratch;
+
+	u32 pm_imr;
+	u32 pm_ier;
+};
+
+enum intel_gt_scratch_field {
+	/* 8 bytes */
+	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
+
+	/* 8 bytes */
+	INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
+
+	/* 8 bytes */
+	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
+
+	/* 8 bytes */
+	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+
+};
+
+#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 6bcfa64..797d8ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -57,9 +57,6 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
 	int slice;
 	int subslice;
 
-	if (engine->id != RCS0)
-		return true;
-
 	intel_engine_get_instdone(engine, &instdone);
 
 	/* There might be unstable subunit states even when
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b42b5f1..270ef417 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -136,11 +136,12 @@
 #include "gem/i915_gem_context.h"
 
 #include "i915_drv.h"
-#include "i915_gem_render_state.h"
 #include "i915_vgpu.h"
 #include "intel_engine_pm.h"
+#include "intel_gt.h"
 #include "intel_lrc_reg.h"
 #include "intel_mocs.h"
+#include "intel_renderstate.h"
 #include "intel_reset.h"
 #include "intel_workarounds.h"
 
@@ -161,6 +162,8 @@
 #define GEN8_CTX_STATUS_COMPLETED_MASK \
 	 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
 
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
 #define WA_TAIL_DWORDS 2
@@ -221,6 +224,26 @@ static void execlists_init_reg_state(u32 *reg_state,
 				     struct intel_engine_cs *engine,
 				     struct intel_ring *ring);
 
+static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
+{
+	return (i915_ggtt_offset(engine->status_page.vma) +
+		I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static inline void
+ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+	/*
+	 * We inspect HWS_PREEMPT with a semaphore inside
+	 * engine->emit_fini_breadcrumb. If the dword is true,
+	 * the ring is paused as the semaphore will busywait
+	 * until the dword is false.
+	 */
+	engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+	if (state)
+		wmb();
+}
+
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
 	return rb_entry(rb, struct i915_priolist, node);
@@ -236,6 +259,17 @@ static int effective_prio(const struct i915_request *rq)
 	int prio = rq_prio(rq);
 
 	/*
+	 * If this request is special and must not be interrupted at any
+	 * cost, so be it. Note we are only checking the most recent request
+	 * in the context and so may be masking an earlier vip request. It
+	 * is hoped that under the conditions where nopreempt is used, this
+	 * will not matter (i.e. all requests to that context will be
+	 * nopreempt for as long as desired).
+	 */
+	if (i915_request_has_nopreempt(rq))
+		prio = I915_PRIORITY_UNPREEMPTABLE;
+
+	/*
 	 * On unwinding the active request, we give it a priority bump
 	 * if it has completed waiting on any semaphore. If we know that
 	 * the request has already started, we can prevent an unwanted
@@ -245,6 +279,7 @@ static int effective_prio(const struct i915_request *rq)
 		prio |= I915_PRIORITY_NOSEMAPHORE;
 
 	/* Restrict mere WAIT boosts from triggering preemption */
+	BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
 	return prio | __NO_PREEMPTION;
 }
 
@@ -271,12 +306,6 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 {
 	int last_prio;
 
-	if (!engine->preempt_context)
-		return false;
-
-	if (i915_request_completed(rq))
-		return false;
-
 	/*
 	 * Check if the current priority hint merits a preemption attempt.
 	 *
@@ -338,9 +367,6 @@ __maybe_unused static inline bool
 assert_priority_queue(const struct i915_request *prev,
 		      const struct i915_request *next)
 {
-	const struct intel_engine_execlists *execlists =
-		&prev->engine->execlists;
-
 	/*
 	 * Without preemption, the prev may refer to the still active element
 	 * which we refuse to let go.
@@ -348,7 +374,7 @@ assert_priority_queue(const struct i915_request *prev,
 	 * Even with preemption, there are times when we think it is better not
 	 * to preempt and leave an ostensibly lower priority request in flight.
 	 */
-	if (port_request(execlists->port) == prev)
+	if (i915_request_is_active(prev))
 		return true;
 
 	return rq_prio(prev) >= rq_prio(next);
@@ -442,13 +468,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 		struct intel_engine_cs *owner;
 
 		if (i915_request_completed(rq))
-			break;
+			continue; /* XXX */
 
 		__i915_request_unsubmit(rq);
 		unwind_wa_tail(rq);
 
-		GEM_BUG_ON(rq->hw_context->inflight);
-
 		/*
 		 * Push the request back into the queue for later resubmission.
 		 * If this request is not native to this physical engine (i.e.
@@ -500,32 +524,32 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
 				   status, rq);
 }
 
-inline void
-execlists_user_begin(struct intel_engine_execlists *execlists,
-		     const struct execlist_port *port)
+static inline struct i915_request *
+execlists_schedule_in(struct i915_request *rq, int idx)
 {
-	execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
+	struct intel_context *ce = rq->hw_context;
+	int count;
+
+	trace_i915_request_in(rq, idx);
+
+	count = intel_context_inflight_count(ce);
+	if (!count) {
+		intel_context_get(ce);
+		ce->inflight = rq->engine;
+
+		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+		intel_engine_context_in(ce->inflight);
+	}
+
+	intel_context_inflight_inc(ce);
+	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+
+	return i915_request_get(rq);
 }
 
-inline void
-execlists_user_end(struct intel_engine_execlists *execlists)
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 {
-	execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
-}
-
-static inline void
-execlists_context_schedule_in(struct i915_request *rq)
-{
-	GEM_BUG_ON(rq->hw_context->inflight);
-
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
-	intel_engine_context_in(rq->engine);
-	rq->hw_context->inflight = rq->engine;
-}
-
-static void kick_siblings(struct i915_request *rq)
-{
-	struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 	struct i915_request *next = READ_ONCE(ve->request);
 
 	if (next && next->execution_mask & ~rq->execution_mask)
@@ -533,30 +557,43 @@ static void kick_siblings(struct i915_request *rq)
 }
 
 static inline void
-execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
-{
-	rq->hw_context->inflight = NULL;
-	intel_engine_context_out(rq->engine);
-	execlists_context_status_change(rq, status);
-	trace_i915_request_out(rq);
-
-	/*
-	 * If this is part of a virtual engine, its next request may have
-	 * been blocked waiting for access to the active context. We have
-	 * to kick all the siblings again in case we need to switch (e.g.
-	 * the next request is not runnable on this engine). Hopefully,
-	 * we will already have submitted the next request before the
-	 * tasklet runs and do not need to rebuild each virtual tree
-	 * and kick everyone again.
-	 */
-	if (rq->engine != rq->hw_context->engine)
-		kick_siblings(rq);
-}
-
-static u64 execlists_update_context(struct i915_request *rq)
+execlists_schedule_out(struct i915_request *rq)
 {
 	struct intel_context *ce = rq->hw_context;
 
+	GEM_BUG_ON(!intel_context_inflight_count(ce));
+
+	trace_i915_request_out(rq);
+
+	intel_context_inflight_dec(ce);
+	if (!intel_context_inflight_count(ce)) {
+		intel_engine_context_out(ce->inflight);
+		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+
+		/*
+		 * If this is part of a virtual engine, its next request may
+		 * have been blocked waiting for access to the active context.
+		 * We have to kick all the siblings again in case we need to
+		 * switch (e.g. the next request is not runnable on this
+		 * engine). Hopefully, we will already have submitted the next
+		 * request before the tasklet runs and do not need to rebuild
+		 * each virtual tree and kick everyone again.
+		 */
+		ce->inflight = NULL;
+		if (rq->engine != ce->engine)
+			kick_siblings(rq, ce);
+
+		intel_context_put(ce);
+	}
+
+	i915_request_put(rq);
+}
+
+static u64 execlists_update_context(const struct i915_request *rq)
+{
+	struct intel_context *ce = rq->hw_context;
+	u64 desc;
+
 	ce->lrc_reg_state[CTX_RING_TAIL + 1] =
 		intel_ring_set_tail(rq->ring, rq->tail);
 
@@ -576,7 +613,11 @@ static u64 execlists_update_context(struct i915_request *rq)
 	 * wmb).
 	 */
 	mb();
-	return ce->lrc_desc;
+
+	desc = ce->lrc_desc;
+	ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+
+	return desc;
 }
 
 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
@@ -590,12 +631,62 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
 	}
 }
 
+static __maybe_unused void
+trace_ports(const struct intel_engine_execlists *execlists,
+	    const char *msg,
+	    struct i915_request * const *ports)
+{
+	const struct intel_engine_cs *engine =
+		container_of(execlists, typeof(*engine), execlists);
+
+	GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
+		  engine->name, msg,
+		  ports[0]->fence.context,
+		  ports[0]->fence.seqno,
+		  i915_request_completed(ports[0]) ? "!" :
+		  i915_request_started(ports[0]) ? "*" :
+		  "",
+		  ports[1] ? ports[1]->fence.context : 0,
+		  ports[1] ? ports[1]->fence.seqno : 0);
+}
+
+static __maybe_unused bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+		     const char *msg)
+{
+	struct i915_request * const *port, *rq;
+	struct intel_context *ce = NULL;
+
+	trace_ports(execlists, msg, execlists->pending);
+
+	if (execlists->pending[execlists_num_ports(execlists)])
+		return false;
+
+	for (port = execlists->pending; (rq = *port); port++) {
+		if (ce == rq->hw_context)
+			return false;
+
+		ce = rq->hw_context;
+		if (i915_request_completed(rq))
+			continue;
+
+		if (i915_active_is_idle(&ce->active))
+			return false;
+
+		if (!i915_vma_is_pinned(ce->state))
+			return false;
+	}
+
+	return ce;
+}
+
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
 	unsigned int n;
 
+	GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
 	/*
 	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
 	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
@@ -604,7 +695,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 	 * that all ELSP are drained i.e. we have processed the CSB,
 	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
 	 */
-	GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
 	/*
 	 * ELSQ note: the submit queue is not cleared after being submitted
@@ -613,38 +704,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 	 * of elsq entries, keep this in mind before changing the loop below.
 	 */
 	for (n = execlists_num_ports(execlists); n--; ) {
-		struct i915_request *rq;
-		unsigned int count;
-		u64 desc;
+		struct i915_request *rq = execlists->pending[n];
 
-		rq = port_unpack(&port[n], &count);
-		if (rq) {
-			GEM_BUG_ON(count > !n);
-			if (!count++)
-				execlists_context_schedule_in(rq);
-			port_set(&port[n], port_pack(rq, count));
-			desc = execlists_update_context(rq);
-			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
-
-			GEM_TRACE("%s in[%d]:  ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
-				  engine->name, n,
-				  port[n].context_id, count,
-				  rq->fence.context, rq->fence.seqno,
-				  hwsp_seqno(rq),
-				  rq_prio(rq));
-		} else {
-			GEM_BUG_ON(!n);
-			desc = 0;
-		}
-
-		write_desc(execlists, desc, n);
+		write_desc(execlists,
+			   rq ? execlists_update_context(rq) : 0,
+			   n);
 	}
 
 	/* we need to manually load the submit queue */
 	if (execlists->ctrl_reg)
 		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
-	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
 }
 
 static bool ctx_single_port_submission(const struct intel_context *ce)
@@ -668,6 +737,7 @@ static bool can_merge_ctx(const struct intel_context *prev,
 static bool can_merge_rq(const struct i915_request *prev,
 			 const struct i915_request *next)
 {
+	GEM_BUG_ON(prev == next);
 	GEM_BUG_ON(!assert_priority_queue(prev, next));
 
 	if (!can_merge_ctx(prev->hw_context, next->hw_context))
@@ -676,58 +746,6 @@ static bool can_merge_rq(const struct i915_request *prev,
 	return true;
 }
 
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
-	GEM_BUG_ON(rq == port_request(port));
-
-	if (port_isset(port))
-		i915_request_put(port_request(port));
-
-	port_set(port, port_pack(i915_request_get(rq), port_count(port)));
-}
-
-static void inject_preempt_context(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	struct intel_context *ce = engine->preempt_context;
-	unsigned int n;
-
-	GEM_BUG_ON(execlists->preempt_complete_status !=
-		   upper_32_bits(ce->lrc_desc));
-
-	/*
-	 * Switch to our empty preempt context so
-	 * the state of the GPU is known (idle).
-	 */
-	GEM_TRACE("%s\n", engine->name);
-	for (n = execlists_num_ports(execlists); --n; )
-		write_desc(execlists, 0, n);
-
-	write_desc(execlists, ce->lrc_desc, n);
-
-	/* we need to manually load the submit queue */
-	if (execlists->ctrl_reg)
-		writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
-	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
-	execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-
-	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
-}
-
-static void complete_preempt_context(struct intel_engine_execlists *execlists)
-{
-	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
-	if (inject_preempt_hang(execlists))
-		return;
-
-	execlists_cancel_port_requests(execlists);
-	__unwind_incomplete_requests(container_of(execlists,
-						  struct intel_engine_cs,
-						  execlists));
-}
-
 static void virtual_update_register_offsets(u32 *regs,
 					    struct intel_engine_cs *engine)
 {
@@ -792,7 +810,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	 * we reuse the register offsets). This is a very small
 	 * hystersis on the greedy seelction algorithm.
 	 */
-	inflight = READ_ONCE(ve->context.inflight);
+	inflight = intel_context_inflight(&ve->context);
 	if (inflight && inflight != engine)
 		return false;
 
@@ -815,13 +833,105 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 	spin_unlock(&old->breadcrumbs.irq_lock);
 }
 
+static struct i915_request *
+last_active(const struct intel_engine_execlists *execlists)
+{
+	struct i915_request * const *last = execlists->active;
+
+	while (*last && i915_request_completed(*last))
+		last++;
+
+	return *last;
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+	LIST_HEAD(list);
+
+	/*
+	 * We want to move the interrupted request to the back of
+	 * the round-robin list (i.e. its priority level), but
+	 * in doing so, we must then move all requests that were in
+	 * flight and were waiting for the interrupted request to
+	 * be run after it again.
+	 */
+	do {
+		struct i915_dependency *p;
+
+		GEM_BUG_ON(i915_request_is_active(rq));
+		list_move_tail(&rq->sched.link, pl);
+
+		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
+
+			/* Leave semaphores spinning on the other engines */
+			if (w->engine != rq->engine)
+				continue;
+
+			/* No waiter should start before its signaler */
+			GEM_BUG_ON(i915_request_started(w) &&
+				   !i915_request_completed(rq));
+
+			GEM_BUG_ON(i915_request_is_active(w));
+			if (list_empty(&w->sched.link))
+				continue; /* Not yet submitted; unready */
+
+			if (rq_prio(w) < rq_prio(rq))
+				continue;
+
+			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	rq = __unwind_incomplete_requests(engine);
+	if (!rq)
+		return;
+
+	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+	int hint;
+
+	if (list_is_last(&rq->sched.link, &engine->active.requests))
+		return false;
+
+	hint = max(rq_prio(list_next_entry(rq, sched.link)),
+		   engine->execlists.queue_priority_hint);
+
+	return hint >= effective_prio(rq);
+}
+
+static bool
+enable_timeslice(struct intel_engine_cs *engine)
+{
+	struct i915_request *last = last_active(&engine->execlists);
+
+	return last && need_timeslice(engine, last);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	const struct execlist_port * const last_port =
-		&execlists->port[execlists->port_mask];
-	struct i915_request *last = port_request(port);
+	struct i915_request **port = execlists->pending;
+	struct i915_request ** const last_port = port + execlists->port_mask;
+	struct i915_request *last;
 	struct rb_node *rb;
 	bool submit = false;
 
@@ -867,65 +977,100 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		break;
 	}
 
+	/*
+	 * If the queue is higher priority than the last
+	 * request in the currently active context, submit afresh.
+	 * We will resubmit again afterwards in case we need to split
+	 * the active context to interject the preemption request,
+	 * i.e. we will retrigger preemption following the ack in case
+	 * of trouble.
+	 */
+	last = last_active(execlists);
 	if (last) {
-		/*
-		 * Don't resubmit or switch until all outstanding
-		 * preemptions (lite-restore) are seen. Then we
-		 * know the next preemption status we see corresponds
-		 * to this ELSP update.
-		 */
-		GEM_BUG_ON(!execlists_is_active(execlists,
-						EXECLISTS_ACTIVE_USER));
-		GEM_BUG_ON(!port_count(&port[0]));
-
-		/*
-		 * If we write to ELSP a second time before the HW has had
-		 * a chance to respond to the previous write, we can confuse
-		 * the HW and hit "undefined behaviour". After writing to ELSP,
-		 * we must then wait until we see a context-switch event from
-		 * the HW to indicate that it has had a chance to respond.
-		 */
-		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
-			return;
-
 		if (need_preempt(engine, last, rb)) {
-			inject_preempt_context(engine);
-			return;
+			GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
+				  engine->name,
+				  last->fence.context,
+				  last->fence.seqno,
+				  last->sched.attr.priority,
+				  execlists->queue_priority_hint);
+			record_preemption(execlists);
+
+			/*
+			 * Don't let the RING_HEAD advance past the breadcrumb
+			 * as we unwind (and until we resubmit) so that we do
+			 * not accidentally tell it to go backwards.
+			 */
+			ring_set_paused(engine, 1);
+
+			/*
+			 * Note that we have not stopped the GPU at this point,
+			 * so we are unwinding the incomplete requests as they
+			 * remain inflight and so by the time we do complete
+			 * the preemption, some of the unwound requests may
+			 * complete!
+			 */
+			__unwind_incomplete_requests(engine);
+
+			/*
+			 * If we need to return to the preempted context, we
+			 * need to skip the lite-restore and force it to
+			 * reload the RING_TAIL. Otherwise, the HW has a
+			 * tendency to ignore us rewinding the TAIL to the
+			 * end of an earlier request.
+			 */
+			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+			last = NULL;
+		} else if (need_timeslice(engine, last) &&
+			   !timer_pending(&engine->execlists.timer)) {
+			GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
+				  engine->name,
+				  last->fence.context,
+				  last->fence.seqno,
+				  last->sched.attr.priority,
+				  execlists->queue_priority_hint);
+
+			ring_set_paused(engine, 1);
+			defer_active(engine);
+
+			/*
+			 * Unlike for preemption, if we rewind and continue
+			 * executing the same context as previously active,
+			 * the order of execution will remain the same and
+			 * the tail will only advance. We do not need to
+			 * force a full context restore, as a lite-restore
+			 * is sufficient to resample the monotonic TAIL.
+			 *
+			 * If we switch to any other context, similarly we
+			 * will not rewind TAIL of current context, and
+			 * normal save/restore will preserve state and allow
+			 * us to later continue executing the same request.
+			 */
+			last = NULL;
+		} else {
+			/*
+			 * Otherwise if we already have a request pending
+			 * for execution after the current one, we can
+			 * just wait until the next CS event before
+			 * queuing more. In either case we will force a
+			 * lite-restore preemption event, but if we wait
+			 * we hopefully coalesce several updates into a single
+			 * submission.
+			 */
+			if (!list_is_last(&last->sched.link,
+					  &engine->active.requests))
+				return;
+
+			/*
+			 * WaIdleLiteRestore:bdw,skl
+			 * Apply the wa NOOPs to prevent
+			 * ring:HEAD == rq:TAIL as we resubmit the
+			 * request. See gen8_emit_fini_breadcrumb() for
+			 * where we prepare the padding after the
+			 * end of the request.
+			 */
+			last->tail = last->wa_tail;
 		}
-
-		/*
-		 * In theory, we could coalesce more requests onto
-		 * the second port (the first port is active, with
-		 * no preemptions pending). However, that means we
-		 * then have to deal with the possible lite-restore
-		 * of the second port (as we submit the ELSP, there
-		 * may be a context-switch) but also we may complete
-		 * the resubmission before the context-switch. Ergo,
-		 * coalescing onto the second port will cause a
-		 * preemption event, but we cannot predict whether
-		 * that will affect port[0] or port[1].
-		 *
-		 * If the second port is already active, we can wait
-		 * until the next context-switch before contemplating
-		 * new requests. The GPU will be busy and we should be
-		 * able to resubmit the new ELSP before it idles,
-		 * avoiding pipeline bubbles (momentary pauses where
-		 * the driver is unable to keep up the supply of new
-		 * work). However, we have to double check that the
-		 * priorities of the ports haven't been switch.
-		 */
-		if (port_count(&port[1]))
-			return;
-
-		/*
-		 * WaIdleLiteRestore:bdw,skl
-		 * Apply the wa NOOPs to prevent
-		 * ring:HEAD == rq:TAIL as we resubmit the
-		 * request. See gen8_emit_fini_breadcrumb() for
-		 * where we prepare the padding after the
-		 * end of the request.
-		 */
-		last->tail = last->wa_tail;
 	}
 
 	while (rb) { /* XXX virtual is always taking precedence */
@@ -955,9 +1100,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				continue;
 			}
 
+			if (i915_request_completed(rq)) {
+				ve->request = NULL;
+				ve->base.execlists.queue_priority_hint = INT_MIN;
+				rb_erase_cached(rb, &execlists->virtual);
+				RB_CLEAR_NODE(rb);
+
+				rq->engine = engine;
+				__i915_request_submit(rq);
+
+				spin_unlock(&ve->base.active.lock);
+
+				rb = rb_first_cached(&execlists->virtual);
+				continue;
+			}
+
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
-				return; /* leave this rq for another engine */
+				return; /* leave this for another */
 			}
 
 			GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
@@ -1006,9 +1166,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			}
 
 			__i915_request_submit(rq);
-			trace_i915_request_in(rq, port_index(port, execlists));
-			submit = true;
-			last = rq;
+			if (!i915_request_completed(rq)) {
+				submit = true;
+				last = rq;
+			}
 		}
 
 		spin_unlock(&ve->base.active.lock);
@@ -1021,6 +1182,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
+			if (i915_request_completed(rq))
+				goto skip;
+
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -1060,19 +1224,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				    ctx_single_port_submission(rq->hw_context))
 					goto done;
 
-
-				if (submit)
-					port_assign(port, last);
+				*port = execlists_schedule_in(last, port - execlists->pending);
 				port++;
-
-				GEM_BUG_ON(port_isset(port));
 			}
 
-			__i915_request_submit(rq);
-			trace_i915_request_in(rq, port_index(port, execlists));
-
 			last = rq;
 			submit = true;
+skip:
+			__i915_request_submit(rq);
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
@@ -1097,54 +1256,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * interrupt for secondary ports).
 	 */
 	execlists->queue_priority_hint = queue_prio(execlists);
+	GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
+		  engine->name, execlists->queue_priority_hint,
+		  yesno(submit));
 
 	if (submit) {
-		port_assign(port, last);
+		*port = execlists_schedule_in(last, port - execlists->pending);
+		memset(port + 1, 0, (last_port - port) * sizeof(*port));
 		execlists_submit_ports(engine);
+	} else {
+		ring_set_paused(engine, 0);
 	}
-
-	/* We must always keep the beast fed if we have work piled up */
-	GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
-		   !port_isset(execlists->port));
-
-	/* Re-evaluate the executing context setup after each preemptive kick */
-	if (last)
-		execlists_user_begin(execlists, execlists->port);
-
-	/* If the engine is now idle, so should be the flag; and vice versa. */
-	GEM_BUG_ON(execlists_is_active(&engine->execlists,
-				       EXECLISTS_ACTIVE_USER) ==
-		   !port_isset(engine->execlists.port));
 }
 
 void
 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 {
-	struct execlist_port *port = execlists->port;
-	unsigned int num_ports = execlists_num_ports(execlists);
+	struct i915_request * const *port, *rq;
 
-	while (num_ports-- && port_isset(port)) {
-		struct i915_request *rq = port_request(port);
+	for (port = execlists->pending; (rq = *port); port++)
+		execlists_schedule_out(rq);
+	memset(execlists->pending, 0, sizeof(execlists->pending));
 
-		GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
-			  rq->engine->name,
-			  (unsigned int)(port - execlists->port),
-			  rq->fence.context, rq->fence.seqno,
-			  hwsp_seqno(rq));
-
-		GEM_BUG_ON(!execlists->active);
-		execlists_context_schedule_out(rq,
-					       i915_request_completed(rq) ?
-					       INTEL_CONTEXT_SCHEDULE_OUT :
-					       INTEL_CONTEXT_SCHEDULE_PREEMPTED);
-
-		i915_request_put(rq);
-
-		memset(port, 0, sizeof(*port));
-		port++;
-	}
-
-	execlists_clear_all_active(execlists);
+	for (port = execlists->active; (rq = *port); port++)
+		execlists_schedule_out(rq);
+	execlists->active =
+		memset(execlists->inflight, 0, sizeof(execlists->inflight));
 }
 
 static inline void
@@ -1160,10 +1297,33 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
 	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
 }
 
+enum csb_step {
+	CSB_NOP,
+	CSB_PROMOTE,
+	CSB_PREEMPT,
+	CSB_COMPLETE,
+};
+
+static inline enum csb_step
+csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+	unsigned int status = *csb;
+
+	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+		return CSB_PROMOTE;
+
+	if (status & GEN8_CTX_STATUS_PREEMPTED)
+		return CSB_PREEMPT;
+
+	if (*execlists->active)
+		return CSB_COMPLETE;
+
+	return CSB_NOP;
+}
+
 static void process_csb(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
 	const u32 * const buf = execlists->csb_status;
 	const u8 num_entries = execlists->csb_size;
 	u8 head, tail;
@@ -1198,10 +1358,6 @@ static void process_csb(struct intel_engine_cs *engine)
 	rmb();
 
 	do {
-		struct i915_request *rq;
-		unsigned int status;
-		unsigned int count;
-
 		if (++head == num_entries)
 			head = 0;
 
@@ -1223,68 +1379,39 @@ static void process_csb(struct intel_engine_cs *engine)
 		 * status notifier.
 		 */
 
-		GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+		GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
 			  engine->name, head,
-			  buf[2 * head + 0], buf[2 * head + 1],
-			  execlists->active);
+			  buf[2 * head + 0], buf[2 * head + 1]);
 
-		status = buf[2 * head];
-		if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
-			      GEN8_CTX_STATUS_PREEMPTED))
-			execlists_set_active(execlists,
-					     EXECLISTS_ACTIVE_HWACK);
-		if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
-			execlists_clear_active(execlists,
-					       EXECLISTS_ACTIVE_HWACK);
+		switch (csb_parse(execlists, buf + 2 * head)) {
+		case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+			trace_ports(execlists, "preempted", execlists->active);
 
-		if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
-			continue;
+			while (*execlists->active)
+				execlists_schedule_out(*execlists->active++);
 
-		/* We should never get a COMPLETED | IDLE_ACTIVE! */
-		GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+			/* fallthrough */
+		case CSB_PROMOTE: /* switch pending to inflight */
+			GEM_BUG_ON(*execlists->active);
+			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+			execlists->active =
+				memcpy(execlists->inflight,
+				       execlists->pending,
+				       execlists_num_ports(execlists) *
+				       sizeof(*execlists->pending));
+			execlists->pending[0] = NULL;
 
-		if (status & GEN8_CTX_STATUS_COMPLETE &&
-		    buf[2*head + 1] == execlists->preempt_complete_status) {
-			GEM_TRACE("%s preempt-idle\n", engine->name);
-			complete_preempt_context(execlists);
-			continue;
-		}
+			trace_ports(execlists, "promoted", execlists->active);
 
-		if (status & GEN8_CTX_STATUS_PREEMPTED &&
-		    execlists_is_active(execlists,
-					EXECLISTS_ACTIVE_PREEMPT))
-			continue;
+			if (enable_timeslice(engine))
+				mod_timer(&execlists->timer, jiffies + 1);
 
-		GEM_BUG_ON(!execlists_is_active(execlists,
-						EXECLISTS_ACTIVE_USER));
+			if (!inject_preempt_hang(execlists))
+				ring_set_paused(engine, 0);
+			break;
 
-		rq = port_unpack(port, &count);
-		GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
-			  engine->name,
-			  port->context_id, count,
-			  rq ? rq->fence.context : 0,
-			  rq ? rq->fence.seqno : 0,
-			  rq ? hwsp_seqno(rq) : 0,
-			  rq ? rq_prio(rq) : 0);
-
-		/* Check the context/desc id for this event matches */
-		GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
-
-		GEM_BUG_ON(count == 0);
-		if (--count == 0) {
-			/*
-			 * On the final event corresponding to the
-			 * submission of this context, we expect either
-			 * an element-switch event or a completion
-			 * event (and on completion, the active-idle
-			 * marker). No more preemptions, lite-restore
-			 * or otherwise.
-			 */
-			GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
-			GEM_BUG_ON(port_isset(&port[1]) &&
-				   !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
-			GEM_BUG_ON(!port_isset(&port[1]) &&
-				   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+		case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+			trace_ports(execlists, "completed", execlists->active);
 
 			/*
 			 * We rely on the hardware being strongly
@@ -1292,22 +1419,15 @@ static void process_csb(struct intel_engine_cs *engine)
 			 * coherent (visible from the CPU) before the
 			 * user interrupt and CSB is processed.
 			 */
-			GEM_BUG_ON(!i915_request_completed(rq));
+			GEM_BUG_ON(!i915_request_completed(*execlists->active));
+			execlists_schedule_out(*execlists->active++);
 
-			execlists_context_schedule_out(rq,
-						       INTEL_CONTEXT_SCHEDULE_OUT);
-			i915_request_put(rq);
+			GEM_BUG_ON(execlists->active - execlists->inflight >
+				   execlists_num_ports(execlists));
+			break;
 
-			GEM_TRACE("%s completed ctx=%d\n",
-				  engine->name, port->context_id);
-
-			port = execlists_port_complete(execlists, port);
-			if (port_isset(port))
-				execlists_user_begin(execlists, port);
-			else
-				execlists_user_end(execlists);
-		} else {
-			port_set(port, port_pack(rq, count));
+		case CSB_NOP:
+			break;
 		}
 	} while (head != tail);
 
@@ -1332,7 +1452,7 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 	lockdep_assert_held(&engine->active.lock);
 
 	process_csb(engine);
-	if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+	if (!engine->execlists.pending[0])
 		execlists_dequeue(engine);
 }
 
@@ -1345,16 +1465,20 @@ static void execlists_submission_tasklet(unsigned long data)
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	unsigned long flags;
 
-	GEM_TRACE("%s awake?=%d, active=%x\n",
-		  engine->name,
-		  !!intel_wakeref_active(&engine->wakeref),
-		  engine->execlists.active);
-
 	spin_lock_irqsave(&engine->active.lock, flags);
 	__execlists_submission_tasklet(engine);
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
+static void execlists_submission_timer(struct timer_list *timer)
+{
+	struct intel_engine_cs *engine =
+		from_timer(engine, timer, execlists.timer);
+
+	/* Kick the tasklet for some interrupt coalescing and reset handling */
+	tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
 static void queue_request(struct intel_engine_cs *engine,
 			  struct i915_sched_node *node,
 			  int prio)
@@ -1376,12 +1500,16 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 		tasklet_hi_schedule(&execlists->tasklet);
 }
 
-static void submit_queue(struct intel_engine_cs *engine, int prio)
+static void submit_queue(struct intel_engine_cs *engine,
+			 const struct i915_request *rq)
 {
-	if (prio > engine->execlists.queue_priority_hint) {
-		engine->execlists.queue_priority_hint = prio;
-		__submit_queue_imm(engine);
-	}
+	struct intel_engine_execlists *execlists = &engine->execlists;
+
+	if (rq_prio(rq) <= execlists->queue_priority_hint)
+		return;
+
+	execlists->queue_priority_hint = rq_prio(rq);
+	__submit_queue_imm(engine);
 }
 
 static void execlists_submit_request(struct i915_request *request)
@@ -1397,7 +1525,7 @@ static void execlists_submit_request(struct i915_request *request)
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->sched.link));
 
-	submit_queue(engine, rq_prio(request));
+	submit_queue(engine, request);
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -1405,15 +1533,14 @@ static void execlists_submit_request(struct i915_request *request)
 static void __execlists_context_fini(struct intel_context *ce)
 {
 	intel_ring_put(ce->ring);
-
-	GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
-	i915_gem_object_put(ce->state->obj);
+	i915_vma_put(ce->state);
 }
 
 static void execlists_context_destroy(struct kref *kref)
 {
 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
 
+	GEM_BUG_ON(!i915_active_is_idle(&ce->active));
 	GEM_BUG_ON(intel_context_is_pinned(ce));
 
 	if (ce->state)
@@ -1426,7 +1553,6 @@ static void execlists_context_unpin(struct intel_context *ce)
 {
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 	i915_gem_object_unpin_map(ce->state->obj);
-	intel_ring_unpin(ce->ring);
 }
 
 static void
@@ -1463,12 +1589,10 @@ __execlists_context_pin(struct intel_context *ce,
 		goto err;
 	GEM_BUG_ON(!ce->state);
 
-	ret = intel_context_active_acquire(ce,
-					   engine->i915->ggtt.pin_bias |
-					   PIN_OFFSET_BIAS |
-					   PIN_HIGH);
+	ret = intel_context_active_acquire(ce);
 	if (ret)
 		goto err;
+	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
 	vaddr = i915_gem_object_pin_map(ce->state->obj,
 					i915_coherent_map_type(engine->i915) |
@@ -1478,13 +1602,9 @@ __execlists_context_pin(struct intel_context *ce,
 		goto unpin_active;
 	}
 
-	ret = intel_ring_pin(ce->ring);
-	if (ret)
-		goto unpin_map;
-
 	ret = i915_gem_context_pin_hw_id(ce->gem_context);
 	if (ret)
-		goto unpin_ring;
+		goto unpin_map;
 
 	ce->lrc_desc = lrc_descriptor(ce, engine);
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1612,6 @@ __execlists_context_pin(struct intel_context *ce,
 
 	return 0;
 
-unpin_ring:
-	intel_ring_unpin(ce->ring);
 unpin_map:
 	i915_gem_object_unpin_map(ce->state->obj);
 unpin_active:
@@ -1682,7 +1800,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 	/* NB no one else is allowed to scribble over scratch + 256! */
 	*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
 	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = i915_scratch_offset(engine->i915) + 256;
+	*batch++ = intel_gt_scratch_offset(engine->gt,
+					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
 	*batch++ = 0;
 
 	*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1696,12 +1815,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 
 	*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
 	*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-	*batch++ = i915_scratch_offset(engine->i915) + 256;
+	*batch++ = intel_gt_scratch_offset(engine->gt,
+					   INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
 	*batch++ = 0;
 
 	return batch;
 }
 
+static u32 slm_offset(struct intel_engine_cs *engine)
+{
+	return intel_gt_scratch_offset(engine->gt,
+				       INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
+}
+
 /*
  * Typically we only have one indirect_ctx and per_ctx batch buffer which are
  * initialized at the beginning and shared across all contexts but this field
@@ -1733,8 +1859,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 				       PIPE_CONTROL_GLOBAL_GTT_IVB |
 				       PIPE_CONTROL_CS_STALL |
 				       PIPE_CONTROL_QW_WRITE,
-				       i915_scratch_offset(engine->i915) +
-				       2 * CACHELINE_BYTES);
+				       slm_offset(engine));
 
 	*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 
@@ -1880,7 +2005,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
@@ -1976,22 +2101,23 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 
 static void enable_execlists(struct intel_engine_cs *engine)
 {
+	u32 mode;
+
+	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
 	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
 
 	if (INTEL_GEN(engine->i915) >= 11)
-		ENGINE_WRITE(engine,
-			     RING_MODE_GEN7,
-			     _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+		mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
 	else
-		ENGINE_WRITE(engine,
-			     RING_MODE_GEN7,
-			     _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+		mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+	ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
 
-	ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 
-	ENGINE_WRITE(engine,
-		     RING_HWS_PGA,
-		     i915_ggtt_offset(engine->status_page.vma));
+	ENGINE_WRITE_FW(engine,
+			RING_HWS_PGA,
+			i915_ggtt_offset(engine->status_page.vma));
 	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
 }
 
@@ -1999,7 +2125,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 {
 	bool unexpected = false;
 
-	if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
+	if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
 		DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
 		unexpected = true;
 	}
@@ -2054,27 +2180,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
-static bool lrc_regs_ok(const struct i915_request *rq)
+static void reset_csb_pointers(struct intel_engine_cs *engine)
 {
-	const struct intel_ring *ring = rq->ring;
-	const u32 *regs = rq->hw_context->lrc_reg_state;
-
-	/* Quick spot check for the common signs of context corruption */
-
-	if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
-	    (RING_CTL_SIZE(ring->size) | RING_VALID))
-		return false;
-
-	if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
-		return false;
-
-	return true;
-}
-
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
-{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
 	const unsigned int reset_value = execlists->csb_size - 1;
 
+	ring_set_paused(engine, 0);
+
 	/*
 	 * After a reset, the HW starts writing into CSB entry [0]. We
 	 * therefore have to set our HEAD pointer back one entry so that
@@ -2121,18 +2233,21 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	process_csb(engine); /* drain preemption events */
 
 	/* Following the reset, we need to reload the CSB read/write pointers */
-	reset_csb_pointers(&engine->execlists);
+	reset_csb_pointers(engine);
 
 	/*
 	 * Save the currently executing context, even if we completed
 	 * its request, it was still running at the time of the
 	 * reset and will have been clobbered.
 	 */
-	if (!port_isset(execlists->port))
-		goto out_clear;
+	rq = execlists_active(execlists);
+	if (!rq)
+		return;
 
-	rq = port_request(execlists->port);
 	ce = rq->hw_context;
+	GEM_BUG_ON(i915_active_is_idle(&ce->active));
+	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+	rq = active_request(rq);
 
 	/*
 	 * Catch up with any missed context-switch interrupts.
@@ -2145,9 +2260,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 */
 	execlists_cancel_port_requests(execlists);
 
-	rq = active_request(rq);
-	if (!rq)
+	if (!rq) {
+		ce->ring->head = ce->ring->tail;
 		goto out_replay;
+	}
+
+	ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
 
 	/*
 	 * If this request hasn't started yet, e.g. it is waiting on a
@@ -2161,7 +2279,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 * Otherwise, if we have not started yet, the request should replay
 	 * perfectly and we do not need to flag the result as being erroneous.
 	 */
-	if (!i915_request_started(rq) && lrc_regs_ok(rq))
+	if (!i915_request_started(rq))
 		goto out_replay;
 
 	/*
@@ -2176,7 +2294,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 * image back to the expected values to skip over the guilty request.
 	 */
 	i915_reset_request(rq, stalled);
-	if (!stalled && lrc_regs_ok(rq))
+	if (!stalled)
 		goto out_replay;
 
 	/*
@@ -2196,17 +2314,13 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	execlists_init_reg_state(regs, ce, engine, ce->ring);
 
 out_replay:
-	/* Rerun the request; its payload has been neutered (if guilty). */
-	ce->ring->head =
-		rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+	GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+		  engine->name, ce->ring->head, ce->ring->tail);
 	intel_ring_update_space(ce->ring);
 	__execlists_update_reg_state(ce, engine);
 
 	/* Push back any incomplete requests for replay after the reset. */
 	__unwind_incomplete_requests(engine);
-
-out_clear:
-	execlists_clear_all_active(execlists);
 }
 
 static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2302,7 +2416,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	execlists->queue_priority_hint = INT_MIN;
 	execlists->queue = RB_ROOT_CACHED;
-	GEM_BUG_ON(port_isset(execlists->port));
 
 	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
 	execlists->tasklet.func = nop_submission_tasklet;
@@ -2440,7 +2553,8 @@ static int gen8_emit_flush_render(struct i915_request *request,
 {
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
-		i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
+		intel_gt_scratch_offset(engine->gt,
+					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 *cs, flags = 0;
 	int len;
@@ -2520,15 +2634,29 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
 	return cs;
 }
 
+static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_EQ_SDD;
+	*cs++ = 0;
+	*cs++ = intel_hws_preempt_address(request->engine);
+	*cs++ = 0;
+
+	return cs;
+}
+
 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
 {
 	cs = gen8_emit_ggtt_write(cs,
 				  request->fence.seqno,
 				  request->timeline->hwsp_offset,
 				  0);
-
 	*cs++ = MI_USER_INTERRUPT;
+
 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	cs = emit_preempt_busywait(request, cs);
 
 	request->tail = intel_ring_offset(request, cs);
 	assert_ring_tail_valid(request->ring, request->tail);
@@ -2549,9 +2677,10 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 				    PIPE_CONTROL_FLUSH_ENABLE |
 				    PIPE_CONTROL_CS_STALL,
 				    0);
-
 	*cs++ = MI_USER_INTERRUPT;
+
 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	cs = emit_preempt_busywait(request, cs);
 
 	request->tail = intel_ring_offset(request, cs);
 	assert_ring_tail_valid(request->ring, request->tail);
@@ -2575,11 +2704,12 @@ static int gen8_init_rcs_context(struct i915_request *rq)
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return i915_gem_render_state_emit(rq);
+	return intel_renderstate_emit(rq);
 }
 
 static void execlists_park(struct intel_engine_cs *engine)
 {
+	del_timer_sync(&engine->execlists.timer);
 	intel_engine_park(engine);
 }
 
@@ -2600,8 +2730,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
 	if (!intel_vgpu_active(engine->i915))
 		engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
-	if (engine->preempt_context &&
-	    HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+	if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
 		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 }
 
@@ -2678,6 +2807,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 
 	tasklet_init(&engine->execlists.tasklet,
 		     execlists_submission_tasklet, (unsigned long)engine);
+	timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
 
 	logical_ring_default_vfuncs(engine);
 	logical_ring_default_irqs(engine);
@@ -2703,9 +2833,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	intel_engine_init_workarounds(engine);
-	intel_engine_init_whitelist(engine);
-
 	if (intel_init_workaround_bb(engine))
 		/*
 		 * We continue even if we fail to initialize WA batch
@@ -2724,11 +2851,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
 			i915_mmio_reg_offset(RING_ELSP(base));
 	}
 
-	execlists->preempt_complete_status = ~0u;
-	if (engine->preempt_context)
-		execlists->preempt_complete_status =
-			upper_32_bits(engine->preempt_context->lrc_desc);
-
 	execlists->csb_status =
 		&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
 
@@ -2740,7 +2862,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
 	else
 		execlists->csb_size = GEN11_CSB_ENTRIES;
 
-	reset_csb_pointers(execlists);
+	reset_csb_pointers(engine);
 
 	return 0;
 }
@@ -2923,11 +3045,6 @@ populate_lr_context(struct intel_context *ce,
 	if (!engine->default_state)
 		regs[CTX_CONTEXT_CONTROL + 1] |=
 			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
-	if (ce->gem_context == engine->i915->preempt_context &&
-	    INTEL_GEN(engine->i915) < 11)
-		regs[CTX_CONTEXT_CONTROL + 1] |=
-			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
 
 	ret = 0;
 err_unpin_ctx:
@@ -2938,12 +3055,13 @@ populate_lr_context(struct intel_context *ce,
 	return ret;
 }
 
-static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+static struct intel_timeline *
+get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
 {
 	if (ctx->timeline)
-		return i915_timeline_get(ctx->timeline);
+		return intel_timeline_get(ctx->timeline);
 	else
-		return i915_timeline_create(ctx->i915, NULL);
+		return intel_timeline_create(gt, NULL);
 }
 
 static int execlists_context_deferred_alloc(struct intel_context *ce,
@@ -2953,7 +3071,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
 	struct i915_vma *vma;
 	u32 context_size;
 	struct intel_ring *ring;
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 	int ret;
 
 	if (ce->state)
@@ -2971,13 +3089,13 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
 	if (IS_ERR(ctx_obj))
 		return PTR_ERR(ctx_obj);
 
-	vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto error_deref_obj;
 	}
 
-	timeline = get_timeline(ce->gem_context);
+	timeline = get_timeline(ce->gem_context, engine->gt);
 	if (IS_ERR(timeline)) {
 		ret = PTR_ERR(timeline);
 		goto error_deref_obj;
@@ -2986,7 +3104,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
 	ring = intel_engine_create_ring(engine,
 					timeline,
 					ce->gem_context->ring_size);
-	i915_timeline_put(timeline);
+	intel_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
@@ -3296,6 +3414,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		return ERR_PTR(-ENOMEM);
 
 	ve->base.i915 = ctx->i915;
+	ve->base.gt = siblings[0]->gt;
 	ve->base.id = -1;
 	ve->base.class = OTHER_CLASS;
 	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 1f9db50..290a5e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@
 #include "i915_drv.h"
 
 #include "intel_engine.h"
+#include "intel_gt.h"
 #include "intel_mocs.h"
 #include "intel_lrc.h"
 
@@ -247,7 +248,7 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
 
 /**
  * get_mocs_settings()
- * @dev_priv:	i915 device.
+ * @gt:		gt device
  * @table:      Output table that will be made to point at appropriate
  *	      MOCS values for the device.
  *
@@ -257,33 +258,34 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
  *
  * Return: true if there are applicable MOCS settings for the device.
  */
-static bool get_mocs_settings(struct drm_i915_private *dev_priv,
+static bool get_mocs_settings(struct intel_gt *gt,
 			      struct drm_i915_mocs_table *table)
 {
+	struct drm_i915_private *i915 = gt->i915;
 	bool result = false;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		table->size  = ARRAY_SIZE(icelake_mocs_table);
 		table->table = icelake_mocs_table;
 		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
 		result = true;
-	} else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
 		table->size  = ARRAY_SIZE(skylake_mocs_table);
 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
 		table->table = skylake_mocs_table;
 		result = true;
-	} else if (IS_GEN9_LP(dev_priv)) {
+	} else if (IS_GEN9_LP(i915)) {
 		table->size  = ARRAY_SIZE(broxton_mocs_table);
 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
 		table->table = broxton_mocs_table;
 		result = true;
 	} else {
-		WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
+		WARN_ONCE(INTEL_GEN(i915) >= 9,
 			  "Platform that should have a MOCS table does not.\n");
 	}
 
 	/* WaDisableSkipCaching:skl,bxt,kbl,glk */
-	if (IS_GEN(dev_priv, 9)) {
+	if (IS_GEN(i915, 9)) {
 		int i;
 
 		for (i = 0; i < table->size; i++)
@@ -338,12 +340,16 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
  */
 void intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct intel_gt *gt = engine->gt;
+	struct intel_uncore *uncore = gt->uncore;
 	struct drm_i915_mocs_table table;
 	unsigned int index;
 	u32 unused_value;
 
-	if (!get_mocs_settings(dev_priv, &table))
+	/* Called under a blanket forcewake */
+	assert_forcewakes_active(uncore, FORCEWAKE_ALL);
+
+	if (!get_mocs_settings(gt, &table))
 		return;
 
 	/* Set unused values to PTE */
@@ -352,12 +358,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
 	for (index = 0; index < table.size; index++) {
 		u32 value = get_entry_control(&table, index);
 
-		I915_WRITE(mocs_register(engine->id, index), value);
+		intel_uncore_write_fw(uncore,
+				      mocs_register(engine->id, index),
+				      value);
 	}
 
 	/* All remaining entries are also unused */
 	for (; index < table.n_entries; index++)
-		I915_WRITE(mocs_register(engine->id, index), unused_value);
+		intel_uncore_write_fw(uncore,
+				      mocs_register(engine->id, index),
+				      unused_value);
 }
 
 /**
@@ -490,7 +500,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
 
 /**
  * intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev_priv:      i915 device private
+ * @gt: the intel_gt container
  *
  * This function simply programs the mocs registers for the given table
  * starting at the given address. This register set is  programmed in pairs.
@@ -502,13 +512,14 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
  *
  * Return: Nothing.
  */
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
+void intel_mocs_init_l3cc_table(struct intel_gt *gt)
 {
+	struct intel_uncore *uncore = gt->uncore;
 	struct drm_i915_mocs_table table;
 	unsigned int i;
 	u16 unused_value;
 
-	if (!get_mocs_settings(dev_priv, &table))
+	if (!get_mocs_settings(gt, &table))
 		return;
 
 	/* Set unused values to PTE */
@@ -518,23 +529,27 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
 		u16 low = get_entry_l3cc(&table, 2 * i);
 		u16 high = get_entry_l3cc(&table, 2 * i + 1);
 
-		I915_WRITE(GEN9_LNCFCMOCS(i),
-			   l3cc_combine(&table, low, high));
+		intel_uncore_write(uncore,
+				   GEN9_LNCFCMOCS(i),
+				   l3cc_combine(&table, low, high));
 	}
 
 	/* Odd table size - 1 left over */
 	if (table.size & 0x01) {
 		u16 low = get_entry_l3cc(&table, 2 * i);
 
-		I915_WRITE(GEN9_LNCFCMOCS(i),
-			   l3cc_combine(&table, low, unused_value));
+		intel_uncore_write(uncore,
+				   GEN9_LNCFCMOCS(i),
+				   l3cc_combine(&table, low, unused_value));
 		i++;
 	}
 
 	/* All remaining entries are also unused */
 	for (; i < table.n_entries / 2; i++)
-		I915_WRITE(GEN9_LNCFCMOCS(i),
-			   l3cc_combine(&table, unused_value, unused_value));
+		intel_uncore_write(uncore,
+				   GEN9_LNCFCMOCS(i),
+				   l3cc_combine(&table, unused_value,
+						unused_value));
 }
 
 /**
@@ -558,7 +573,7 @@ int intel_rcs_context_init_mocs(struct i915_request *rq)
 	struct drm_i915_mocs_table t;
 	int ret;
 
-	if (get_mocs_settings(rq->i915, &t)) {
+	if (get_mocs_settings(rq->engine->gt, &t)) {
 		/* Program the RCS control registers */
 		ret = emit_mocs_control_table(rq, &t);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 0913704..8b9813e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -52,9 +52,10 @@
 struct drm_i915_private;
 struct i915_request;
 struct intel_engine_cs;
+struct intel_gt;
 
 int intel_rcs_context_init_mocs(struct i915_request *rq);
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
+void intel_mocs_init_l3cc_table(struct intel_gt *gt);
 void intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
similarity index 94%
rename from drivers/gpu/drm/i915/i915_gem_render_state.c
rename to drivers/gpu/drm/i915/gt/intel_renderstate.c
index 4ee0320..06a8dc4 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -26,10 +26,9 @@
  */
 
 #include "i915_drv.h"
-#include "i915_gem_render_state.h"
 #include "intel_renderstate.h"
 
-struct intel_render_state {
+struct intel_renderstate {
 	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
@@ -75,7 +74,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
 		(batch)[(i)++] = (val);				\
 	} while(0)
 
-static int render_state_setup(struct intel_render_state *so,
+static int render_state_setup(struct intel_renderstate *so,
 			      struct drm_i915_private *i915)
 {
 	const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -177,10 +176,10 @@ static int render_state_setup(struct intel_render_state *so,
 
 #undef OUT_BATCH
 
-int i915_gem_render_state_emit(struct i915_request *rq)
+int intel_renderstate_emit(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct intel_render_state so = {}; /* keep the compiler happy */
+	struct intel_renderstate so = {}; /* keep the compiler happy */
 	int err;
 
 	so.rodata = render_state_get_rodata(engine);
@@ -194,7 +193,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
 	if (IS_ERR(so.obj))
 		return PTR_ERR(so.obj);
 
-	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
+	so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(so.vma)) {
 		err = PTR_ERR(so.vma);
 		goto err_obj;
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
similarity index 91%
rename from drivers/gpu/drm/i915/intel_renderstate.h
rename to drivers/gpu/drm/i915/gt/intel_renderstate.h
index 08f6fea..8d50791 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -21,11 +21,13 @@
  * DEALINGS IN THE SOFTWARE.
  */
 
-#ifndef _INTEL_RENDERSTATE_H
-#define _INTEL_RENDERSTATE_H
+#ifndef _INTEL_RENDERSTATE_H_
+#define _INTEL_RENDERSTATE_H_
 
 #include <linux/types.h>
 
+struct i915_request;
+
 struct intel_renderstate_rodata {
 	const u32 *reloc;
 	const u32 *batch;
@@ -44,4 +46,6 @@ extern const struct intel_renderstate_rodata gen7_null_state;
 extern const struct intel_renderstate_rodata gen8_null_state;
 extern const struct intel_renderstate_rodata gen9_null_state;
 
-#endif /* INTEL_RENDERSTATE_H */
+int intel_renderstate_emit(struct i915_request *rq);
+
+#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4c478b3..adfdb90 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -15,6 +15,7 @@
 #include "i915_gpu_error.h"
 #include "i915_irq.h"
 #include "intel_engine_pm.h"
+#include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_reset.h"
 
@@ -25,16 +26,6 @@
 /* XXX How to handle concurrent GGTT updates using tiling registers? */
 #define RESET_UNDER_STOP_MACHINE 0
 
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
-	intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
-	intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
 {
 	intel_uncore_rmw_fw(uncore, reg, 0, set);
@@ -687,7 +678,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
 	 * written to the powercontext is undefined and so we may lose
 	 * GPU state upon resume, i.e. fail to restart after a reset.
 	 */
-	intel_engine_pm_get(engine);
 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
 	engine->reset.prepare(engine);
 }
@@ -718,16 +708,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
 	}
 }
 
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
+	intel_engine_mask_t awake = 0;
 	enum intel_engine_id id;
 
-	intel_gt_pm_get(i915);
-	for_each_engine(engine, i915, id)
+	for_each_engine(engine, i915, id) {
+		if (intel_engine_pm_get_if_awake(engine))
+			awake |= engine->mask;
 		reset_prepare_engine(engine);
+	}
 
 	intel_uc_reset_prepare(i915);
+
+	return awake;
 }
 
 static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +756,22 @@ static int gt_reset(struct drm_i915_private *i915,
 static void reset_finish_engine(struct intel_engine_cs *engine)
 {
 	engine->reset.finish(engine);
-	intel_engine_pm_put(engine);
 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+	intel_engine_signal_breadcrumbs(engine);
 }
 
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+			 intel_engine_mask_t awake)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	for_each_engine(engine, i915, id) {
 		reset_finish_engine(engine);
-		intel_engine_signal_breadcrumbs(engine);
+		if (awake & engine->mask)
+			intel_engine_pm_put(engine);
 	}
-	intel_gt_pm_put(i915);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -798,6 +795,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	struct intel_engine_cs *engine;
+	intel_engine_mask_t awake;
 	enum intel_engine_id id;
 
 	if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +815,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * rolling the global seqno forward (since this would complete requests
 	 * for which we haven't set the fence error to EIO yet).
 	 */
-	reset_prepare(i915);
+	awake = reset_prepare(i915);
 
 	/* Even if the GPU reset fails, it should still stop the engines */
 	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +839,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id)
 		engine->cancel_requests(engine);
 
-	reset_finish(i915);
+	reset_finish(i915, awake);
 
 	GEM_TRACE("end\n");
 }
@@ -860,7 +858,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
-	struct i915_timeline *tl;
+	struct intel_timeline *tl;
 
 	if (!test_bit(I915_WEDGED, &error->flags))
 		return true;
@@ -900,7 +898,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 	}
 	mutex_unlock(&i915->gt.timelines.mutex);
 
-	intel_gt_sanitize(i915, false);
+	intel_gt_sanitize(&i915->gt, false);
 
 	/*
 	 * Undo nop_submit_request. We prevent all new i915 requests from
@@ -951,6 +949,21 @@ static int do_reset(struct drm_i915_private *i915,
 	return gt_reset(i915, stalled_mask);
 }
 
+static int resume(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int ret;
+
+	for_each_engine(engine, i915, id) {
+		ret = engine->resume(engine);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @i915: #drm_i915_private to reset
@@ -973,6 +986,7 @@ void i915_reset(struct drm_i915_private *i915,
 		const char *reason)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
+	intel_engine_mask_t awake;
 	int ret;
 
 	GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1003,7 @@ void i915_reset(struct drm_i915_private *i915,
 		dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
 	error->reset_count++;
 
-	reset_prepare(i915);
+	awake = reset_prepare(i915);
 
 	if (!intel_has_gpu_reset(i915)) {
 		if (i915_modparams.reset)
@@ -1024,13 +1038,17 @@ void i915_reset(struct drm_i915_private *i915,
 	if (ret) {
 		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
 			  ret);
-		goto error;
+		goto taint;
 	}
 
+	ret = resume(i915);
+	if (ret)
+		goto taint;
+
 	i915_queue_hangcheck(i915);
 
 finish:
-	reset_finish(i915);
+	reset_finish(i915, awake);
 unlock:
 	mutex_unlock(&error->wedge_mutex);
 	return;
@@ -1081,7 +1099,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
 
-	if (!intel_wakeref_active(&engine->wakeref))
+	if (!intel_engine_pm_get_if_awake(engine))
 		return 0;
 
 	reset_prepare_engine(engine);
@@ -1116,12 +1134,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	 * process to program RING_MODE, HWSP and re-enable submission.
 	 */
 	ret = engine->resume(engine);
-	if (ret)
-		goto out;
 
 out:
 	intel_engine_cancel_stop_cs(engine);
 	reset_finish_engine(engine);
+	intel_engine_pm_put(engine);
 	return ret;
 }
 
@@ -1157,119 +1174,6 @@ static void i915_reset_device(struct drm_i915_private *i915,
 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
 }
 
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
-	intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
-static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
-{
-	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
-	GEN6_RING_FAULT_REG_POSTING_READ(engine);
-}
-
-static void clear_error_registers(struct drm_i915_private *i915,
-				  intel_engine_mask_t engine_mask)
-{
-	struct intel_uncore *uncore = &i915->uncore;
-	u32 eir;
-
-	if (!IS_GEN(i915, 2))
-		clear_register(uncore, PGTBL_ER);
-
-	if (INTEL_GEN(i915) < 4)
-		clear_register(uncore, IPEIR(RENDER_RING_BASE));
-	else
-		clear_register(uncore, IPEIR_I965);
-
-	clear_register(uncore, EIR);
-	eir = intel_uncore_read(uncore, EIR);
-	if (eir) {
-		/*
-		 * some errors might have become stuck,
-		 * mask them.
-		 */
-		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
-		rmw_set(uncore, EMR, eir);
-		intel_uncore_write(uncore, GEN2_IIR,
-				   I915_MASTER_ERROR_INTERRUPT);
-	}
-
-	if (INTEL_GEN(i915) >= 8) {
-		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
-		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
-	} else if (INTEL_GEN(i915) >= 6) {
-		struct intel_engine_cs *engine;
-		enum intel_engine_id id;
-
-		for_each_engine_masked(engine, i915, engine_mask, id)
-			gen8_clear_engine_error_register(engine);
-	}
-}
-
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	u32 fault;
-
-	for_each_engine(engine, dev_priv, id) {
-		fault = GEN6_RING_FAULT_REG_READ(engine);
-		if (fault & RING_FAULT_VALID) {
-			DRM_DEBUG_DRIVER("Unexpected fault\n"
-					 "\tAddr: 0x%08lx\n"
-					 "\tAddress space: %s\n"
-					 "\tSource ID: %d\n"
-					 "\tType: %d\n",
-					 fault & PAGE_MASK,
-					 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
-					 RING_FAULT_SRCID(fault),
-					 RING_FAULT_FAULT_TYPE(fault));
-		}
-	}
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
-	u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
-	if (fault & RING_FAULT_VALID) {
-		u32 fault_data0, fault_data1;
-		u64 fault_addr;
-
-		fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
-		fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
-		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
-			     ((u64)fault_data0 << 12);
-
-		DRM_DEBUG_DRIVER("Unexpected fault\n"
-				 "\tAddr: 0x%08x_%08x\n"
-				 "\tAddress space: %s\n"
-				 "\tEngine ID: %d\n"
-				 "\tSource ID: %d\n"
-				 "\tType: %d\n",
-				 upper_32_bits(fault_addr),
-				 lower_32_bits(fault_addr),
-				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
-				 GEN8_RING_FAULT_ENGINE_ID(fault),
-				 RING_FAULT_SRCID(fault),
-				 RING_FAULT_FAULT_TYPE(fault));
-	}
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *i915)
-{
-	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
-	if (INTEL_GEN(i915) >= 8)
-		gen8_check_faults(i915);
-	else if (INTEL_GEN(i915) >= 6)
-		gen6_check_faults(i915);
-	else
-		return;
-
-	clear_error_registers(i915, ALL_ENGINES);
-}
-
 /**
  * i915_handle_error - handle a gpu error
  * @i915: i915 device private
@@ -1318,7 +1222,7 @@ void i915_handle_error(struct drm_i915_private *i915,
 
 	if (flags & I915_ERROR_CAPTURE) {
 		i915_capture_error_state(i915, engine_mask, msg);
-		clear_error_registers(i915, engine_mask);
+		intel_gt_clear_error_registers(&i915->gt, engine_mask);
 	}
 
 	/*
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 580ebdb..03fba0a 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -25,8 +25,6 @@ void i915_handle_error(struct drm_i915_private *i915,
 		       const char *fmt, ...);
 #define I915_ERROR_CAPTURE BIT(0)
 
-void i915_check_and_clear_faults(struct drm_i915_private *i915);
-
 void i915_reset(struct drm_i915_private *i915,
 		intel_engine_mask_t stalled_mask,
 		const char *reason);
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index c6023bc..a98652e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -34,9 +34,10 @@
 #include "gem/i915_gem_context.h"
 
 #include "i915_drv.h"
-#include "i915_gem_render_state.h"
 #include "i915_trace.h"
 #include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_renderstate.h"
 #include "intel_reset.h"
 #include "intel_workarounds.h"
 
@@ -75,7 +76,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
 	*cs++ = cmd;
 	while (num_store_dw--) {
 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-		*cs++ = i915_scratch_offset(rq->i915);
+		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+						INTEL_GT_SCRATCH_FIELD_DEFAULT);
 		*cs++ = 0;
 	}
 	*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
@@ -148,7 +150,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE) {
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+						INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+			PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
 		*cs++ = 0;
 
@@ -156,7 +160,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 			*cs++ = MI_FLUSH;
 
 		*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-		*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+						INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+			PIPE_CONTROL_GLOBAL_GTT;
 		*cs++ = 0;
 		*cs++ = 0;
 	}
@@ -208,7 +214,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 static int
 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
-	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr =
+		intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs;
 
 	cs = intel_ring_begin(rq, 6);
@@ -241,7 +249,9 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 static int
 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr =
+		intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs, flags = 0;
 	int ret;
 
@@ -299,7 +309,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 
 	*cs++ = GFX_OP_PIPE_CONTROL(4);
 	*cs++ = PIPE_CONTROL_QW_WRITE;
-	*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+		PIPE_CONTROL_GLOBAL_GTT;
 	*cs++ = 0;
 
 	/* Finally we can flush and with it emit the breadcrumb */
@@ -342,7 +354,9 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 static int
 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-	u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+	u32 scratch_addr =
+		intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
 	u32 *cs, flags = 0;
 
 	/*
@@ -805,7 +819,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_emit(rq);
+	ret = intel_renderstate_emit(rq);
 	if (ret)
 		return ret;
 
@@ -1033,14 +1047,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 	/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
 	ENGINE_POSTING_READ(engine, RING_IMR);
 
-	gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
+	gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static void
 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
 	ENGINE_WRITE(engine, RING_IMR, ~0);
-	gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
+	gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask);
 }
 
 static int
@@ -1071,9 +1085,11 @@ i830_emit_bb_start(struct i915_request *rq,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+	u32 *cs, cs_offset =
+		intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_DEFAULT);
 
-	GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
+	GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
 
 	cs = intel_ring_begin(rq, 6);
 	if (IS_ERR(cs))
@@ -1149,16 +1165,16 @@ i915_emit_bb_start(struct i915_request *rq,
 int intel_ring_pin(struct intel_ring *ring)
 {
 	struct i915_vma *vma = ring->vma;
-	enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
 	unsigned int flags;
 	void *addr;
 	int ret;
 
-	GEM_BUG_ON(ring->vaddr);
+	if (atomic_fetch_inc(&ring->pin_count))
+		return 0;
 
-	ret = i915_timeline_pin(ring->timeline);
+	ret = intel_timeline_pin(ring->timeline);
 	if (ret)
-		return ret;
+		goto err_unpin;
 
 	flags = PIN_GLOBAL;
 
@@ -1172,26 +1188,32 @@ int intel_ring_pin(struct intel_ring *ring)
 
 	ret = i915_vma_pin(vma, 0, 0, flags);
 	if (unlikely(ret))
-		goto unpin_timeline;
+		goto err_timeline;
 
 	if (i915_vma_is_map_and_fenceable(vma))
 		addr = (void __force *)i915_vma_pin_iomap(vma);
 	else
-		addr = i915_gem_object_pin_map(vma->obj, map);
+		addr = i915_gem_object_pin_map(vma->obj,
+					       i915_coherent_map_type(vma->vm->i915));
 	if (IS_ERR(addr)) {
 		ret = PTR_ERR(addr);
-		goto unpin_ring;
+		goto err_ring;
 	}
 
 	vma->obj->pin_global++;
 
+	GEM_BUG_ON(ring->vaddr);
 	ring->vaddr = addr;
+
+	GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
 	return 0;
 
-unpin_ring:
+err_ring:
 	i915_vma_unpin(vma);
-unpin_timeline:
-	i915_timeline_unpin(ring->timeline);
+err_timeline:
+	intel_timeline_unpin(ring->timeline);
+err_unpin:
+	atomic_dec(&ring->pin_count);
 	return ret;
 }
 
@@ -1207,34 +1229,40 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
 
 void intel_ring_unpin(struct intel_ring *ring)
 {
-	GEM_BUG_ON(!ring->vma);
-	GEM_BUG_ON(!ring->vaddr);
+	if (!atomic_dec_and_test(&ring->pin_count))
+		return;
+
+	GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
 
 	/* Discard any unused bytes beyond that submitted to hw. */
 	intel_ring_reset(ring, ring->tail);
 
+	GEM_BUG_ON(!ring->vma);
+	i915_vma_unset_ggtt_write(ring->vma);
 	if (i915_vma_is_map_and_fenceable(ring->vma))
 		i915_vma_unpin_iomap(ring->vma);
 	else
 		i915_gem_object_unpin_map(ring->vma->obj);
+
+	GEM_BUG_ON(!ring->vaddr);
 	ring->vaddr = NULL;
 
 	ring->vma->obj->pin_global--;
 	i915_vma_unpin(ring->vma);
 
-	i915_timeline_unpin(ring->timeline);
+	intel_timeline_unpin(ring->timeline);
 }
 
-static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
 {
-	struct i915_address_space *vm = &dev_priv->ggtt.vm;
+	struct i915_address_space *vm = &ggtt->vm;
+	struct drm_i915_private *i915 = vm->i915;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 
-	obj = i915_gem_object_create_stolen(dev_priv, size);
+	obj = i915_gem_object_create_stolen(i915, size);
 	if (!obj)
-		obj = i915_gem_object_create_internal(dev_priv, size);
+		obj = i915_gem_object_create_internal(i915, size);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
@@ -1258,9 +1286,10 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_timeline *timeline,
+			 struct intel_timeline *timeline,
 			 int size)
 {
+	struct drm_i915_private *i915 = engine->i915;
 	struct intel_ring *ring;
 	struct i915_vma *vma;
 
@@ -1273,7 +1302,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 
 	kref_init(&ring->ref);
 	INIT_LIST_HEAD(&ring->request_list);
-	ring->timeline = i915_timeline_get(timeline);
+	ring->timeline = intel_timeline_get(timeline);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -1281,12 +1310,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 	 * of the buffer.
 	 */
 	ring->effective_size = size;
-	if (IS_I830(engine->i915) || IS_I845G(engine->i915))
+	if (IS_I830(i915) || IS_I845G(i915))
 		ring->effective_size -= 2 * CACHELINE_BYTES;
 
 	intel_ring_update_space(ring);
 
-	vma = intel_ring_create_vma(engine->i915, size);
+	vma = create_ring_vma(engine->gt->ggtt, size);
 	if (IS_ERR(vma)) {
 		kfree(ring);
 		return ERR_CAST(vma);
@@ -1303,13 +1332,12 @@ void intel_ring_free(struct kref *ref)
 	i915_vma_close(ring->vma);
 	i915_vma_put(ring->vma);
 
-	i915_timeline_put(ring->timeline);
+	intel_timeline_put(ring->timeline);
 	kfree(ring);
 }
 
 static void __ring_context_fini(struct intel_context *ce)
 {
-	GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
 	i915_gem_object_put(ce->state->obj);
 }
 
@@ -1404,7 +1432,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
 		i915_gem_object_unpin_map(obj);
 	}
 
-	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err_obj;
@@ -1438,7 +1466,7 @@ static int ring_context_pin(struct intel_context *ce)
 		ce->state = vma;
 	}
 
-	err = intel_context_active_acquire(ce, PIN_HIGH);
+	err = intel_context_active_acquire(ce);
 	if (err)
 		return err;
 
@@ -1503,7 +1531,8 @@ static int flush_pd_dir(struct i915_request *rq)
 	/* Stall until the page table load is complete */
 	*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
 	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
-	*cs++ = i915_scratch_offset(rq->i915);
+	*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+					INTEL_GT_SCRATCH_FIELD_DEFAULT);
 	*cs++ = MI_NOOP;
 
 	intel_ring_advance(rq, cs);
@@ -1619,7 +1648,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 			/* Insert a delay before the next switch! */
 			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
 			*cs++ = i915_mmio_reg_offset(last_reg);
-			*cs++ = i915_scratch_offset(rq->i915);
+			*cs++ = intel_gt_scratch_offset(rq->engine->gt,
+							INTEL_GT_SCRATCH_FIELD_DEFAULT);
 			*cs++ = MI_NOOP;
 		}
 		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2081,10 +2111,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
 	WARN_ON(INTEL_GEN(dev_priv) > 2 &&
 		(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
+	intel_engine_cleanup_common(engine);
+
 	intel_ring_unpin(engine->buffer);
 	intel_ring_put(engine->buffer);
 
-	intel_engine_cleanup_common(engine);
 	kfree(engine);
 }
 
@@ -2258,11 +2289,11 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 
 int intel_ring_submission_init(struct intel_engine_cs *engine)
 {
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 	struct intel_ring *ring;
 	int err;
 
-	timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+	timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
 	if (IS_ERR(timeline)) {
 		err = PTR_ERR(timeline);
 		goto err;
@@ -2270,7 +2301,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
 	GEM_BUG_ON(timeline->has_initial_breadcrumb);
 
 	ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
-	i915_timeline_put(timeline);
+	intel_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
 		goto err;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
similarity index 69%
rename from drivers/gpu/drm/i915/i915_timeline.c
rename to drivers/gpu/drm/i915/gt/intel_timeline.c
index c311ce9..6daa9eb 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,38 +4,36 @@
  * Copyright © 2016-2018 Intel Corporation
  */
 
+#include "gt/intel_gt_types.h"
+
 #include "i915_drv.h"
 
 #include "i915_active.h"
 #include "i915_syncmap.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
 
 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
 
-struct i915_timeline_hwsp {
-	struct i915_gt_timelines *gt;
+struct intel_timeline_hwsp {
+	struct intel_gt *gt;
+	struct intel_gt_timelines *gt_timelines;
 	struct list_head free_link;
 	struct i915_vma *vma;
 	u64 free_bitmap;
 };
 
-struct i915_timeline_cacheline {
+struct intel_timeline_cacheline {
 	struct i915_active active;
-	struct i915_timeline_hwsp *hwsp;
+	struct intel_timeline_hwsp *hwsp;
 	void *vaddr;
 #define CACHELINE_BITS 6
 #define CACHELINE_FREE CACHELINE_BITS
 };
 
-static inline struct drm_i915_private *
-hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
+static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
 {
-	return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
-}
-
-static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
-{
+	struct drm_i915_private *i915 = gt->i915;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
 
@@ -45,7 +43,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
 
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
-	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
 	if (IS_ERR(vma))
 		i915_gem_object_put(obj);
 
@@ -53,11 +51,10 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
 }
 
 static struct i915_vma *
-hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
+hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
 {
-	struct drm_i915_private *i915 = timeline->i915;
-	struct i915_gt_timelines *gt = &i915->gt.timelines;
-	struct i915_timeline_hwsp *hwsp;
+	struct intel_gt_timelines *gt = &timeline->gt->timelines;
+	struct intel_timeline_hwsp *hwsp;
 
 	BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
 
@@ -75,16 +72,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
 		if (!hwsp)
 			return ERR_PTR(-ENOMEM);
 
-		vma = __hwsp_alloc(i915);
+		vma = __hwsp_alloc(timeline->gt);
 		if (IS_ERR(vma)) {
 			kfree(hwsp);
 			return vma;
 		}
 
 		vma->private = hwsp;
+		hwsp->gt = timeline->gt;
 		hwsp->vma = vma;
 		hwsp->free_bitmap = ~0ull;
-		hwsp->gt = gt;
+		hwsp->gt_timelines = gt;
 
 		spin_lock_irq(&gt->hwsp_lock);
 		list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -102,9 +100,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
 	return hwsp->vma;
 }
 
-static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
+static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
 {
-	struct i915_gt_timelines *gt = hwsp->gt;
+	struct intel_gt_timelines *gt = hwsp->gt_timelines;
 	unsigned long flags;
 
 	spin_lock_irqsave(&gt->hwsp_lock, flags);
@@ -126,7 +124,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
 	spin_unlock_irqrestore(&gt->hwsp_lock, flags);
 }
 
-static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
 
@@ -140,7 +138,7 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
 
 static void __cacheline_retire(struct i915_active *active)
 {
-	struct i915_timeline_cacheline *cl =
+	struct intel_timeline_cacheline *cl =
 		container_of(active, typeof(*cl), active);
 
 	i915_vma_unpin(cl->hwsp->vma);
@@ -148,10 +146,19 @@ static void __cacheline_retire(struct i915_active *active)
 		__idle_cacheline_free(cl);
 }
 
-static struct i915_timeline_cacheline *
-cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+static int __cacheline_active(struct i915_active *active)
 {
-	struct i915_timeline_cacheline *cl;
+	struct intel_timeline_cacheline *cl =
+		container_of(active, typeof(*cl), active);
+
+	__i915_vma_pin(cl->hwsp->vma);
+	return 0;
+}
+
+static struct intel_timeline_cacheline *
+cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+	struct intel_timeline_cacheline *cl;
 	void *vaddr;
 
 	GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
@@ -170,24 +177,25 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
 	cl->hwsp = hwsp;
 	cl->vaddr = page_pack_bits(vaddr, cacheline);
 
-	i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+	i915_active_init(hwsp->gt->i915, &cl->active,
+			 __cacheline_active, __cacheline_retire);
 
 	return cl;
 }
 
-static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl)
 {
-	if (cl && i915_active_acquire(&cl->active))
-		__i915_vma_pin(cl->hwsp->vma);
+	if (cl)
+		i915_active_acquire(&cl->active);
 }
 
-static void cacheline_release(struct i915_timeline_cacheline *cl)
+static void cacheline_release(struct intel_timeline_cacheline *cl)
 {
 	if (cl)
 		i915_active_release(&cl->active);
 }
 
-static void cacheline_free(struct i915_timeline_cacheline *cl)
+static void cacheline_free(struct intel_timeline_cacheline *cl)
 {
 	GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
 	cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
@@ -196,29 +204,22 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
 		__idle_cacheline_free(cl);
 }
 
-int i915_timeline_init(struct drm_i915_private *i915,
-		       struct i915_timeline *timeline,
-		       struct i915_vma *hwsp)
+int intel_timeline_init(struct intel_timeline *timeline,
+			struct intel_gt *gt,
+			struct i915_vma *hwsp)
 {
 	void *vaddr;
 
-	/*
-	 * Ideally we want a set of engines on a single leaf as we expect
-	 * to mostly be tracking synchronisation between engines. It is not
-	 * a huge issue if this is not the case, but we may want to mitigate
-	 * any page crossing penalties if they become an issue.
-	 *
-	 * Called during early_init before we know how many engines there are.
-	 */
-	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+	kref_init(&timeline->kref);
 
-	timeline->i915 = i915;
+	timeline->gt = gt;
 	timeline->pin_count = 0;
+
 	timeline->has_initial_breadcrumb = !hwsp;
 	timeline->hwsp_cacheline = NULL;
 
 	if (!hwsp) {
-		struct i915_timeline_cacheline *cl;
+		struct intel_timeline_cacheline *cl;
 		unsigned int cacheline;
 
 		hwsp = hwsp_alloc(timeline, &cacheline);
@@ -261,55 +262,47 @@ int i915_timeline_init(struct drm_i915_private *i915,
 	return 0;
 }
 
-void i915_timelines_init(struct drm_i915_private *i915)
+static void timelines_init(struct intel_gt *gt)
 {
-	struct i915_gt_timelines *gt = &i915->gt.timelines;
+	struct intel_gt_timelines *timelines = &gt->timelines;
 
-	mutex_init(&gt->mutex);
-	INIT_LIST_HEAD(&gt->active_list);
+	mutex_init(&timelines->mutex);
+	INIT_LIST_HEAD(&timelines->active_list);
 
-	spin_lock_init(&gt->hwsp_lock);
-	INIT_LIST_HEAD(&gt->hwsp_free_list);
-
-	/* via i915_gem_wait_for_idle() */
-	i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+	spin_lock_init(&timelines->hwsp_lock);
+	INIT_LIST_HEAD(&timelines->hwsp_free_list);
 }
 
-static void timeline_add_to_active(struct i915_timeline *tl)
+void intel_timelines_init(struct drm_i915_private *i915)
 {
-	struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+	timelines_init(&i915->gt);
+}
+
+static void timeline_add_to_active(struct intel_timeline *tl)
+{
+	struct intel_gt_timelines *gt = &tl->gt->timelines;
 
 	mutex_lock(&gt->mutex);
 	list_add(&tl->link, &gt->active_list);
 	mutex_unlock(&gt->mutex);
 }
 
-static void timeline_remove_from_active(struct i915_timeline *tl)
+static void timeline_remove_from_active(struct intel_timeline *tl)
 {
-	struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+	struct intel_gt_timelines *gt = &tl->gt->timelines;
 
 	mutex_lock(&gt->mutex);
 	list_del(&tl->link);
 	mutex_unlock(&gt->mutex);
 }
 
-/**
- * i915_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_timelines_park(struct drm_i915_private *i915)
+static void timelines_park(struct intel_gt *gt)
 {
-	struct i915_gt_timelines *gt = &i915->gt.timelines;
-	struct i915_timeline *timeline;
+	struct intel_gt_timelines *timelines = &gt->timelines;
+	struct intel_timeline *timeline;
 
-	mutex_lock(&gt->mutex);
-	list_for_each_entry(timeline, &gt->active_list, link) {
+	mutex_lock(&timelines->mutex);
+	list_for_each_entry(timeline, &timelines->active_list, link) {
 		/*
 		 * All known fences are completed so we can scrap
 		 * the current sync point tracking and start afresh,
@@ -318,10 +311,25 @@ void i915_timelines_park(struct drm_i915_private *i915)
 		 */
 		i915_syncmap_free(&timeline->sync);
 	}
-	mutex_unlock(&gt->mutex);
+	mutex_unlock(&timelines->mutex);
 }
 
-void i915_timeline_fini(struct i915_timeline *timeline)
+/**
+ * intel_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void intel_timelines_park(struct drm_i915_private *i915)
+{
+	timelines_park(&i915->gt);
+}
+
+void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(timeline->pin_count);
 	GEM_BUG_ON(!list_empty(&timeline->requests));
@@ -336,29 +344,26 @@ void i915_timeline_fini(struct i915_timeline *timeline)
 	i915_vma_put(timeline->hwsp_ggtt);
 }
 
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
-		     struct i915_vma *global_hwsp)
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
 {
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 	int err;
 
 	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
 	if (!timeline)
 		return ERR_PTR(-ENOMEM);
 
-	err = i915_timeline_init(i915, timeline, global_hwsp);
+	err = intel_timeline_init(timeline, gt, global_hwsp);
 	if (err) {
 		kfree(timeline);
 		return ERR_PTR(err);
 	}
 
-	kref_init(&timeline->kref);
-
 	return timeline;
 }
 
-int i915_timeline_pin(struct i915_timeline *tl)
+int intel_timeline_pin(struct intel_timeline *tl)
 {
 	int err;
 
@@ -384,7 +389,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
 	return err;
 }
 
-static u32 timeline_advance(struct i915_timeline *tl)
+static u32 timeline_advance(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
@@ -392,17 +397,17 @@ static u32 timeline_advance(struct i915_timeline *tl)
 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
 }
 
-static void timeline_rollback(struct i915_timeline *tl)
+static void timeline_rollback(struct intel_timeline *tl)
 {
 	tl->seqno -= 1 + tl->has_initial_breadcrumb;
 }
 
 static noinline int
-__i915_timeline_get_seqno(struct i915_timeline *tl,
-			  struct i915_request *rq,
-			  u32 *seqno)
+__intel_timeline_get_seqno(struct intel_timeline *tl,
+			   struct i915_request *rq,
+			   u32 *seqno)
 {
-	struct i915_timeline_cacheline *cl;
+	struct intel_timeline_cacheline *cl;
 	unsigned int cacheline;
 	struct i915_vma *vma;
 	void *vaddr;
@@ -488,31 +493,31 @@ __i915_timeline_get_seqno(struct i915_timeline *tl,
 	return err;
 }
 
-int i915_timeline_get_seqno(struct i915_timeline *tl,
-			    struct i915_request *rq,
-			    u32 *seqno)
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+			     struct i915_request *rq,
+			     u32 *seqno)
 {
 	*seqno = timeline_advance(tl);
 
 	/* Replace the HWSP on wraparound for HW semaphores */
 	if (unlikely(!*seqno && tl->hwsp_cacheline))
-		return __i915_timeline_get_seqno(tl, rq, seqno);
+		return __intel_timeline_get_seqno(tl, rq, seqno);
 
 	return 0;
 }
 
-static int cacheline_ref(struct i915_timeline_cacheline *cl,
+static int cacheline_ref(struct intel_timeline_cacheline *cl,
 			 struct i915_request *rq)
 {
 	return i915_active_ref(&cl->active, rq->fence.context, rq);
 }
 
-int i915_timeline_read_hwsp(struct i915_request *from,
-			    struct i915_request *to,
-			    u32 *hwsp)
+int intel_timeline_read_hwsp(struct i915_request *from,
+			     struct i915_request *to,
+			     u32 *hwsp)
 {
-	struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
-	struct i915_timeline *tl = from->timeline;
+	struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+	struct intel_timeline *tl = from->timeline;
 	int err;
 
 	GEM_BUG_ON(to->timeline == tl);
@@ -535,7 +540,7 @@ int i915_timeline_read_hwsp(struct i915_request *from,
 	return err;
 }
 
-void i915_timeline_unpin(struct i915_timeline *tl)
+void intel_timeline_unpin(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
 	if (--tl->pin_count)
@@ -554,26 +559,31 @@ void i915_timeline_unpin(struct i915_timeline *tl)
 	__i915_vma_unpin(tl->hwsp_ggtt);
 }
 
-void __i915_timeline_free(struct kref *kref)
+void __intel_timeline_free(struct kref *kref)
 {
-	struct i915_timeline *timeline =
+	struct intel_timeline *timeline =
 		container_of(kref, typeof(*timeline), kref);
 
-	i915_timeline_fini(timeline);
+	intel_timeline_fini(timeline);
 	kfree(timeline);
 }
 
-void i915_timelines_fini(struct drm_i915_private *i915)
+static void timelines_fini(struct intel_gt *gt)
 {
-	struct i915_gt_timelines *gt = &i915->gt.timelines;
+	struct intel_gt_timelines *timelines = &gt->timelines;
 
-	GEM_BUG_ON(!list_empty(&gt->active_list));
-	GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+	GEM_BUG_ON(!list_empty(&timelines->active_list));
+	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
 
-	mutex_destroy(&gt->mutex);
+	mutex_destroy(&timelines->mutex);
+}
+
+void intel_timelines_fini(struct drm_i915_private *i915)
+{
+	timelines_fini(&i915->gt);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_timeline.c"
+#include "gt/selftests/mock_timeline.c"
+#include "gt/selftest_timeline.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
new file mode 100644
index 0000000..e08cebf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
+
+#include <linux/lockdep.h>
+
+#include "i915_active.h"
+#include "i915_syncmap.h"
+#include "gt/intel_timeline_types.h"
+
+int intel_timeline_init(struct intel_timeline *tl,
+			struct intel_gt *gt,
+			struct i915_vma *hwsp);
+void intel_timeline_fini(struct intel_timeline *tl);
+
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+
+static inline struct intel_timeline *
+intel_timeline_get(struct intel_timeline *timeline)
+{
+	kref_get(&timeline->kref);
+	return timeline;
+}
+
+void __intel_timeline_free(struct kref *kref);
+static inline void intel_timeline_put(struct intel_timeline *timeline)
+{
+	kref_put(&timeline->kref, __intel_timeline_free);
+}
+
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+					    u64 context, u32 seqno)
+{
+	return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+					  const struct dma_fence *fence)
+{
+	return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+						  u64 context, u32 seqno)
+{
+	return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+						const struct dma_fence *fence)
+{
+	return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl);
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+			     struct i915_request *rq,
+			     u32 *seqno);
+void intel_timeline_unpin(struct intel_timeline *tl);
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+			     struct i915_request *until,
+			     u32 *hwsp_offset);
+
+void intel_timelines_init(struct drm_i915_private *i915);
+void intel_timelines_park(struct drm_i915_private *i915);
+void intel_timelines_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
similarity index 91%
rename from drivers/gpu/drm/i915/i915_timeline_types.h
rename to drivers/gpu/drm/i915/gt/intel_timeline_types.h
index fce5cb4..9a71aea 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -16,10 +16,10 @@
 
 struct drm_i915_private;
 struct i915_vma;
-struct i915_timeline_cacheline;
+struct intel_timeline_cacheline;
 struct i915_syncmap;
 
-struct i915_timeline {
+struct intel_timeline {
 	u64 fence_context;
 	u32 seqno;
 
@@ -30,7 +30,7 @@ struct i915_timeline {
 	struct i915_vma *hwsp_ggtt;
 	u32 hwsp_offset;
 
-	struct i915_timeline_cacheline *hwsp_cacheline;
+	struct intel_timeline_cacheline *hwsp_cacheline;
 
 	bool has_initial_breadcrumb;
 
@@ -59,7 +59,7 @@ struct i915_timeline {
 	struct i915_syncmap *sync;
 
 	struct list_head link;
-	struct drm_i915_private *i915;
+	struct intel_gt *gt;
 
 	struct kref kref;
 };
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 15e90fd..f6fd690 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "intel_context.h"
+#include "intel_gt.h"
 #include "intel_workarounds.h"
 
 /**
@@ -990,9 +991,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
 	spin_unlock_irqrestore(&uncore->lock, flags);
 }
 
-void intel_gt_apply_workarounds(struct drm_i915_private *i915)
+void intel_gt_apply_workarounds(struct intel_gt *gt)
 {
-	wa_list_apply(&i915->uncore, &i915->gt_wa_list);
+	wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
 }
 
 static bool wa_list_verify(struct intel_uncore *uncore,
@@ -1011,10 +1012,9 @@ static bool wa_list_verify(struct intel_uncore *uncore,
 	return ok;
 }
 
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
-				 const char *from)
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
 {
-	return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
+	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
 }
 
 static void
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
 
 static void cfl_whitelist_build(struct intel_engine_cs *engine)
 {
+	struct i915_wa_list *w = &engine->whitelist;
+
 	if (engine->class != RENDER_CLASS)
 		return;
 
-	gen9_whitelist_build(&engine->whitelist);
+	gen9_whitelist_build(w);
+
+	/*
+	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+	 *
+	 * This covers 4 register which are next to one another :
+	 *   - PS_INVOCATION_COUNT
+	 *   - PS_INVOCATION_COUNT_UDW
+	 *   - PS_DEPTH_COUNT
+	 *   - PS_DEPTH_COUNT_UDW
+	 */
+	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+			  RING_FORCE_TO_NONPRIV_RD |
+			  RING_FORCE_TO_NONPRIV_RANGE_4);
 }
 
 static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
 
 		/* WaEnableStateCacheRedirectToCS:icl */
 		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+		/*
+		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+		 *
+		 * This covers 4 register which are next to one another :
+		 *   - PS_INVOCATION_COUNT
+		 *   - PS_INVOCATION_COUNT_UDW
+		 *   - PS_DEPTH_COUNT
+		 *   - PS_DEPTH_COUNT_UDW
+		 */
+		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+				  RING_FORCE_TO_NONPRIV_RD |
+				  RING_FORCE_TO_NONPRIV_RANGE_4);
 		break;
 
 	case VIDEO_DECODE_CLASS:
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
 			wa_write_or(wal,
 				    GEN7_SARCHKMD,
-				    GEN7_DISABLE_DEMAND_PREFETCH |
-				    GEN7_DISABLE_SAMPLER_PREFETCH);
+				    GEN7_DISABLE_DEMAND_PREFETCH);
+
+		/* Wa_1606682166:icl */
+		wa_write_or(wal,
+			    GEN7_SARCHKMD,
+			    GEN7_DISABLE_SAMPLER_PREFETCH);
 	}
 
 	if (IS_GEN_RANGE(i915, 9, 11)) {
@@ -1328,7 +1360,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
 	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
 		return;
 
-	if (engine->id == RCS0)
+	if (engine->class == RENDER_CLASS)
 		rcs_engine_wa_init(engine, wal);
 	else
 		xcs_engine_wa_init(engine, wal);
@@ -1338,7 +1370,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
 {
 	struct i915_wa_list *wal = &engine->wa_list;
 
-	if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+	if (INTEL_GEN(engine->i915) < 8)
 		return;
 
 	wa_init_start(wal, engine->name);
@@ -1426,7 +1458,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
 	if (!wal->count)
 		return 0;
 
-	vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 3761a6e..8c9c769 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -14,6 +14,7 @@
 struct drm_i915_private;
 struct i915_request;
 struct intel_engine_cs;
+struct intel_gt;
 
 static inline void intel_wa_list_free(struct i915_wa_list *wal)
 {
@@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
 int intel_engine_emit_ctx_wa(struct i915_request *rq);
 
 void intel_gt_init_workarounds(struct drm_i915_private *i915);
-void intel_gt_apply_workarounds(struct drm_i915_private *i915);
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
-				 const char *from);
+void intel_gt_apply_workarounds(struct intel_gt *gt);
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
 
 void intel_engine_init_whitelist(struct intel_engine_cs *engine);
 void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 086801b..490ebd1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -33,15 +33,15 @@
 
 struct mock_ring {
 	struct intel_ring base;
-	struct i915_timeline timeline;
+	struct intel_timeline timeline;
 };
 
-static void mock_timeline_pin(struct i915_timeline *tl)
+static void mock_timeline_pin(struct intel_timeline *tl)
 {
 	tl->pin_count++;
 }
 
-static void mock_timeline_unpin(struct i915_timeline *tl)
+static void mock_timeline_unpin(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
 	tl->pin_count--;
@@ -56,7 +56,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	if (!ring)
 		return NULL;
 
-	if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
+	if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
 		kfree(ring);
 		return NULL;
 	}
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	ring->base.effective_size = sz;
 	ring->base.vaddr = (void *)(ring + 1);
 	ring->base.timeline = &ring->timeline;
+	atomic_set(&ring->base.pin_count, 1);
 
 	INIT_LIST_HEAD(&ring->base.request_list);
 	intel_ring_update_space(&ring->base);
@@ -77,7 +78,7 @@ static void mock_ring_free(struct intel_ring *base)
 {
 	struct mock_ring *ring = container_of(base, typeof(*ring), base);
 
-	i915_timeline_fini(&ring->timeline);
+	intel_timeline_fini(&ring->timeline);
 	kfree(ring);
 }
 
@@ -154,7 +155,7 @@ static int mock_context_pin(struct intel_context *ce)
 			return -ENOMEM;
 	}
 
-	ret = intel_context_active_acquire(ce, PIN_HIGH);
+	ret = intel_context_active_acquire(ce);
 	if (ret)
 		return ret;
 
@@ -256,6 +257,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 
 	/* minimal engine setup for requests */
 	engine->base.i915 = i915;
+	engine->base.gt = &i915->gt;
 	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
 	engine->base.id = id;
 	engine->base.mask = BIT(id);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 1ee4c92..2d9cc3c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,6 +25,7 @@
 #include <linux/kthread.h>
 
 #include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
 #include "intel_engine_pm.h"
 
 #include "i915_selftest.h"
@@ -43,6 +44,7 @@
 
 struct hang {
 	struct drm_i915_private *i915;
+	struct intel_gt *gt;
 	struct drm_i915_gem_object *hws;
 	struct drm_i915_gem_object *obj;
 	struct i915_gem_context *ctx;
@@ -128,35 +130,31 @@ static struct i915_request *
 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = h->i915;
-	struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
+	struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+	struct drm_i915_gem_object *obj;
 	struct i915_request *rq = NULL;
 	struct i915_vma *hws, *vma;
 	unsigned int flags;
+	void *vaddr;
 	u32 *batch;
 	int err;
 
-	if (i915_gem_object_is_active(h->obj)) {
-		struct drm_i915_gem_object *obj;
-		void *vaddr;
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
 
-		obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
-		if (IS_ERR(obj))
-			return ERR_CAST(obj);
-
-		vaddr = i915_gem_object_pin_map(obj,
-						i915_coherent_map_type(h->i915));
-		if (IS_ERR(vaddr)) {
-			i915_gem_object_put(obj);
-			return ERR_CAST(vaddr);
-		}
-
-		i915_gem_object_unpin_map(h->obj);
-		i915_gem_object_put(h->obj);
-
-		h->obj = obj;
-		h->batch = vaddr;
+	vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(i915));
+	if (IS_ERR(vaddr)) {
+		i915_gem_object_put(obj);
+		return ERR_CAST(vaddr);
 	}
 
+	i915_gem_object_unpin_map(h->obj);
+	i915_gem_object_put(h->obj);
+
+	h->obj = obj;
+	h->batch = vaddr;
+
 	vma = i915_vma_instance(h->obj, vm, NULL);
 	if (IS_ERR(vma))
 		return ERR_CAST(vma);
@@ -242,7 +240,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 		*batch++ = lower_32_bits(vma->node.start);
 	}
 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
-	i915_gem_chipset_flush(h->i915);
+	intel_gt_chipset_flush(engine->gt);
 
 	if (rq->engine->emit_init_breadcrumb) {
 		err = rq->engine->emit_init_breadcrumb(rq);
@@ -251,7 +249,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 	}
 
 	flags = 0;
-	if (INTEL_GEN(vm->i915) <= 5)
+	if (INTEL_GEN(i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
 
 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -276,7 +274,9 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
 static void hang_fini(struct hang *h)
 {
 	*h->batch = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(h->i915);
+
+	if (h->gt)
+		intel_gt_chipset_flush(h->gt);
 
 	i915_gem_object_unpin_map(h->obj);
 	i915_gem_object_put(h->obj);
@@ -333,7 +333,7 @@ static int igt_hang_sanitycheck(void *arg)
 		i915_request_get(rq);
 
 		*h.batch = MI_BATCH_BUFFER_END;
-		i915_gem_chipset_flush(i915);
+		intel_gt_chipset_flush(engine->gt);
 
 		i915_request_add(rq);
 
@@ -373,7 +373,6 @@ static int igt_reset_nop(void *arg)
 	struct i915_gem_context *ctx;
 	unsigned int reset_count, count;
 	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
 	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	int err = 0;
@@ -393,11 +392,11 @@ static int igt_reset_nop(void *arg)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	reset_count = i915_reset_count(&i915->gpu_error);
 	count = 0;
 	do {
 		mutex_lock(&i915->drm.struct_mutex);
+
 		for_each_engine(engine, i915, id) {
 			int i;
 
@@ -413,11 +412,12 @@ static int igt_reset_nop(void *arg)
 				i915_request_add(rq);
 			}
 		}
-		mutex_unlock(&i915->drm.struct_mutex);
 
 		igt_global_reset_lock(i915);
 		i915_reset(i915, ALL_ENGINES, NULL);
 		igt_global_reset_unlock(i915);
+
+		mutex_unlock(&i915->drm.struct_mutex);
 		if (i915_reset_failed(i915)) {
 			err = -EIO;
 			break;
@@ -440,8 +440,6 @@ static int igt_reset_nop(void *arg)
 	err = igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
 out:
 	mock_file_free(i915, file);
 	if (i915_reset_failed(i915))
@@ -455,7 +453,6 @@ static int igt_reset_nop_engine(void *arg)
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
 	struct drm_file *file;
 	int err = 0;
 
@@ -477,7 +474,6 @@ static int igt_reset_nop_engine(void *arg)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	for_each_engine(engine, i915, id) {
 		unsigned int reset_count, reset_engine_count;
 		unsigned int count;
@@ -511,9 +507,8 @@ static int igt_reset_nop_engine(void *arg)
 
 				i915_request_add(rq);
 			}
-			mutex_unlock(&i915->drm.struct_mutex);
-
 			err = i915_reset_engine(engine, NULL);
+			mutex_unlock(&i915->drm.struct_mutex);
 			if (err) {
 				pr_err("i915_reset_engine failed\n");
 				break;
@@ -548,7 +543,6 @@ static int igt_reset_nop_engine(void *arg)
 	err = igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 out:
 	mock_file_free(i915, file);
 	if (i915_reset_failed(i915))
@@ -1509,7 +1503,7 @@ static int igt_reset_queue(void *arg)
 		pr_info("%s: Completed %d resets\n", engine->name, count);
 
 		*h.batch = MI_BATCH_BUFFER_END;
-		i915_gem_chipset_flush(i915);
+		intel_gt_chipset_flush(engine->gt);
 
 		i915_request_put(prev);
 
@@ -1750,7 +1744,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
 	drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
 
-	err = i915_subtests(tests, i915);
+	err = i915_live_subtests(tests, i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	igt_flush_test(i915, I915_WAIT_LOCKED);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 401e8b5..b9b881a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -73,12 +73,228 @@ static int live_sanitycheck(void *arg)
 err_spin:
 	igt_spinner_fini(&spin);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 }
 
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 10);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = i915_ggtt_offset(vma) + 4 * idx;
+	*cs++ = 0;
+
+	if (idx > 0) {
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+		*cs++ = 0;
+		*cs++ = 1;
+	} else {
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+	intel_ring_advance(rq, cs);
+	return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+	struct i915_gem_context *ctx;
+	struct i915_request *rq;
+	int err;
+
+	ctx = kernel_context(engine->i915);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	rq = igt_request_alloc(ctx, engine);
+	if (IS_ERR(rq))
+		goto out_ctx;
+
+	err = emit_semaphore_chain(rq, vma, idx);
+	i915_request_add(rq);
+	if (err)
+		rq = ERR_PTR(err);
+
+out_ctx:
+	kernel_context_close(ctx);
+	return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+	      struct i915_vma *vma,
+	      int idx)
+{
+	struct i915_sched_attr attr = {
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+	};
+	struct i915_request *rq;
+	u32 *cs;
+
+	rq = i915_request_create(engine->kernel_context);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+	*cs++ = 0;
+	*cs++ = 1;
+
+	intel_ring_advance(rq, cs);
+	i915_request_add(rq);
+
+	engine->schedule(rq, &attr);
+
+	return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+		      struct i915_vma *vma,
+		      int count)
+{
+	struct intel_engine_cs *engine;
+	struct i915_request *head;
+	enum intel_engine_id id;
+	int err, i, n = 0;
+
+	head = semaphore_queue(outer, vma, n++);
+	if (IS_ERR(head))
+		return PTR_ERR(head);
+
+	i915_request_get(head);
+	for_each_engine(engine, outer->i915, id) {
+		for (i = 0; i < count; i++) {
+			struct i915_request *rq;
+
+			rq = semaphore_queue(engine, vma, n++);
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				goto out;
+			}
+		}
+	}
+
+	err = release_queue(outer, vma, n);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(head,
+			      I915_WAIT_LOCKED,
+			      2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+		pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
+		       count, n);
+		GEM_TRACE_DUMP();
+		i915_gem_set_wedged(outer->i915);
+		err = -EIO;
+	}
+
+out:
+	i915_request_put(head);
+	return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	intel_wakeref_t wakeref;
+	struct i915_vma *vma;
+	void *vaddr;
+	int err = 0;
+	int count;
+
+	/*
+	 * If a request takes too long, we would like to give other users
+	 * a fair go on the GPU. In particular, users may create batches
+	 * that wait upon external input, where that input may even be
+	 * supplied by another GPU job. To avoid blocking forever, we
+	 * need to preempt the current task and replace it with another
+	 * ready task.
+	 */
+
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_unlock;
+	}
+
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err)
+		goto err_map;
+
+	for_each_prime_number_from(count, 1, 16) {
+		struct intel_engine_cs *engine;
+		enum intel_engine_id id;
+
+		for_each_engine(engine, i915, id) {
+			memset(vaddr, 0, PAGE_SIZE);
+
+			err = slice_semaphore_queue(engine, vma, count);
+			if (err)
+				goto err_pin;
+
+			if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+				err = -EIO;
+				goto err_pin;
+			}
+		}
+	}
+
+err_pin:
+	i915_vma_unpin(vma);
+err_map:
+	i915_gem_object_unpin_map(obj);
+err_obj:
+	i915_gem_object_put(obj);
+err_unlock:
+	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return err;
+}
+
 static int live_busywait_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -253,8 +469,6 @@ static int live_busywait_preempt(void *arg)
 err_ctx_hi:
 	kernel_context_close(ctx_hi);
 err_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -361,7 +575,6 @@ static int live_preempt(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -398,6 +611,9 @@ static int live_late_preempt(void *arg)
 	if (!ctx_lo)
 		goto err_ctx_hi;
 
+	/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+	ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
 	for_each_engine(engine, i915, id) {
 		struct igt_live_test t;
 		struct i915_request *rq;
@@ -465,7 +681,6 @@ static int live_late_preempt(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -506,6 +721,114 @@ static void preempt_client_fini(struct preempt_client *c)
 	kernel_context_close(c->ctx);
 }
 
+static int live_nopreempt(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct preempt_client a, b;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	int err = -ENOMEM;
+
+	/*
+	 * Verify that we can disable preemption for an individual request
+	 * that may be being observed and not want to be interrupted.
+	 */
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+	if (preempt_client_init(i915, &a))
+		goto err_unlock;
+	if (preempt_client_init(i915, &b))
+		goto err_client_a;
+	b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq_a, *rq_b;
+
+		if (!intel_engine_has_preemption(engine))
+			continue;
+
+		engine->execlists.preempt_hang.count = 0;
+
+		rq_a = igt_spinner_create_request(&a.spin,
+						  a.ctx, engine,
+						  MI_ARB_CHECK);
+		if (IS_ERR(rq_a)) {
+			err = PTR_ERR(rq_a);
+			goto err_client_b;
+		}
+
+		/* Low priority client, but unpreemptable! */
+		rq_a->flags |= I915_REQUEST_NOPREEMPT;
+
+		i915_request_add(rq_a);
+		if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+			pr_err("First client failed to start\n");
+			goto err_wedged;
+		}
+
+		rq_b = igt_spinner_create_request(&b.spin,
+						  b.ctx, engine,
+						  MI_ARB_CHECK);
+		if (IS_ERR(rq_b)) {
+			err = PTR_ERR(rq_b);
+			goto err_client_b;
+		}
+
+		i915_request_add(rq_b);
+
+		/* B is much more important than A! (But A is unpreemptable.) */
+		GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+		/* Wait long enough for preemption and timeslicing */
+		if (igt_wait_for_spinner(&b.spin, rq_b)) {
+			pr_err("Second client started too early!\n");
+			goto err_wedged;
+		}
+
+		igt_spinner_end(&a.spin);
+
+		if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+			pr_err("Second client failed to start\n");
+			goto err_wedged;
+		}
+
+		igt_spinner_end(&b.spin);
+
+		if (engine->execlists.preempt_hang.count) {
+			pr_err("Preemption recorded x%d; should have been suppressed!\n",
+			       engine->execlists.preempt_hang.count);
+			err = -EINVAL;
+			goto err_wedged;
+		}
+
+		if (igt_flush_test(i915, I915_WAIT_LOCKED))
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&b);
+err_client_a:
+	preempt_client_fini(&a);
+err_unlock:
+	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+
+err_wedged:
+	igt_spinner_end(&b.spin);
+	igt_spinner_end(&a.spin);
+	i915_gem_set_wedged(i915);
+	err = -EIO;
+	goto err_client_b;
+}
+
 static int live_suppress_self_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -604,8 +927,6 @@ static int live_suppress_self_preempt(void *arg)
 err_client_a:
 	preempt_client_fini(&a);
 err_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -646,6 +967,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
 	i915_sw_fence_init(&rq->submit, dummy_notify);
 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
+	spin_lock_init(&rq->lock);
+	rq->fence.lock = &rq->lock;
+	INIT_LIST_HEAD(&rq->fence.cb_list);
+
 	return rq;
 }
 
@@ -773,8 +1098,6 @@ static int live_suppress_wait_preempt(void *arg)
 err_client_0:
 	preempt_client_fini(&client[0]);
 err_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -921,8 +1244,6 @@ static int live_chain_preempt(void *arg)
 err_client_hi:
 	preempt_client_fini(&hi);
 err_unlock:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -1048,7 +1369,6 @@ static int live_preempt_hang(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -1812,9 +2132,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_sanitycheck),
+		SUBTEST(live_timeslice_preempt),
 		SUBTEST(live_busywait_preempt),
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
+		SUBTEST(live_nopreempt),
 		SUBTEST(live_suppress_self_preempt),
 		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
@@ -1831,5 +2153,5 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 	if (i915_terminally_wedged(i915))
 		return 0;
 
-	return i915_subtests(tests, i915);
+	return i915_live_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7..672e32e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -63,21 +63,25 @@ static int igt_atomic_reset(void *arg)
 
 	/* Check that the resets are usable from atomic context */
 
+	intel_gt_pm_get(&i915->gt);
 	igt_global_reset_lock(i915);
-	mutex_lock(&i915->drm.struct_mutex);
 
 	/* Flush any requests before we get started and check basics */
 	if (!igt_force_reset(i915))
 		goto unlock;
 
 	for (p = igt_atomic_phases; p->name; p++) {
+		intel_engine_mask_t awake;
+
 		GEM_TRACE("intel_gpu_reset under %s\n", p->name);
 
+		awake = reset_prepare(i915);
 		p->critical_section_begin();
-		reset_prepare(i915);
+
 		err = intel_gpu_reset(i915, ALL_ENGINES);
-		reset_finish(i915);
+
 		p->critical_section_end();
+		reset_finish(i915, awake);
 
 		if (err) {
 			pr_err("intel_gpu_reset failed under %s\n", p->name);
@@ -89,8 +93,66 @@ static int igt_atomic_reset(void *arg)
 	igt_force_reset(i915);
 
 unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
+	intel_gt_pm_put(&i915->gt);
+
+	return err;
+}
+
+static int igt_atomic_engine_reset(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	const typeof(*igt_atomic_phases) *p;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/* Check that the resets are usable from atomic context */
+
+	if (!intel_has_reset_engine(i915))
+		return 0;
+
+	if (USES_GUC_SUBMISSION(i915))
+		return 0;
+
+	intel_gt_pm_get(&i915->gt);
+	igt_global_reset_lock(i915);
+
+	/* Flush any requests before we get started and check basics */
+	if (!igt_force_reset(i915))
+		goto out_unlock;
+
+	for_each_engine(engine, i915, id) {
+		tasklet_disable_nosync(&engine->execlists.tasklet);
+		intel_engine_pm_get(engine);
+
+		for (p = igt_atomic_phases; p->name; p++) {
+			GEM_TRACE("i915_reset_engine(%s) under %s\n",
+				  engine->name, p->name);
+
+			p->critical_section_begin();
+			err = i915_reset_engine(engine, NULL);
+			p->critical_section_end();
+
+			if (err) {
+				pr_err("i915_reset_engine(%s) failed under %s\n",
+				       engine->name, p->name);
+				break;
+			}
+		}
+
+		intel_engine_pm_put(engine);
+		tasklet_enable(&engine->execlists.tasklet);
+		if (err)
+			break;
+	}
+
+	/* As we poke around the guts, do a full reset before continuing. */
+	igt_force_reset(i915);
+
+out_unlock:
+	igt_global_reset_unlock(i915);
+	intel_gt_pm_put(&i915->gt);
 
 	return err;
 }
@@ -101,6 +163,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
 		SUBTEST(igt_wedged_reset),
 		SUBTEST(igt_atomic_reset),
+		SUBTEST(igt_atomic_engine_reset),
 	};
 	intel_wakeref_t wakeref;
 	int err = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
similarity index 86%
rename from drivers/gpu/drm/i915/selftests/i915_timeline.c
rename to drivers/gpu/drm/i915/gt/selftest_timeline.c
index 76d3977..eae3b19 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -8,14 +8,14 @@
 
 #include "gem/i915_gem_pm.h"
 
-#include "i915_random.h"
-#include "i915_selftest.h"
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
 
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
+#include "../selftests/igt_flush_test.h"
+#include "../selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
 
-static struct page *hwsp_page(struct i915_timeline *tl)
+static struct page *hwsp_page(struct intel_timeline *tl)
 {
 	struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
 
@@ -23,7 +23,7 @@ static struct page *hwsp_page(struct i915_timeline *tl)
 	return sg_page(obj->mm.pages->sgl);
 }
 
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
 {
 	unsigned long address = (unsigned long)page_address(hwsp_page(tl));
 
@@ -35,7 +35,7 @@ static unsigned long hwsp_cacheline(struct i915_timeline *tl)
 struct mock_hwsp_freelist {
 	struct drm_i915_private *i915;
 	struct radix_tree_root cachelines;
-	struct i915_timeline **history;
+	struct intel_timeline **history;
 	unsigned long count, max;
 	struct rnd_state prng;
 };
@@ -46,12 +46,12 @@ enum {
 
 static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
 			       unsigned int idx,
-			       struct i915_timeline *tl)
+			       struct intel_timeline *tl)
 {
 	tl = xchg(&state->history[idx], tl);
 	if (tl) {
 		radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
-		i915_timeline_put(tl);
+		intel_timeline_put(tl);
 	}
 }
 
@@ -59,14 +59,14 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
 				unsigned int count,
 				unsigned int flags)
 {
-	struct i915_timeline *tl;
+	struct intel_timeline *tl;
 	unsigned int idx;
 
 	while (count--) {
 		unsigned long cacheline;
 		int err;
 
-		tl = i915_timeline_create(state->i915, NULL);
+		tl = intel_timeline_create(&state->i915->gt, NULL);
 		if (IS_ERR(tl))
 			return PTR_ERR(tl);
 
@@ -77,7 +77,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
 				pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
 				       cacheline);
 			}
-			i915_timeline_put(tl);
+			intel_timeline_put(tl);
 			return err;
 		}
 
@@ -162,21 +162,21 @@ struct __igt_sync {
 	bool set;
 };
 
-static int __igt_sync(struct i915_timeline *tl,
+static int __igt_sync(struct intel_timeline *tl,
 		      u64 ctx,
 		      const struct __igt_sync *p,
 		      const char *name)
 {
 	int ret;
 
-	if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
 		pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
 		       name, p->name, ctx, p->seqno, yesno(p->expected));
 		return -EINVAL;
 	}
 
 	if (p->set) {
-		ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
+		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
 		if (ret)
 			return ret;
 	}
@@ -204,7 +204,7 @@ static int igt_sync(void *arg)
 		{ "unwrap", UINT_MAX, true, false },
 		{},
 	}, *p;
-	struct i915_timeline tl;
+	struct intel_timeline tl;
 	int order, offset;
 	int ret = -ENODEV;
 
@@ -248,7 +248,7 @@ static unsigned int random_engine(struct rnd_state *rnd)
 static int bench_sync(void *arg)
 {
 	struct rnd_state prng;
-	struct i915_timeline tl;
+	struct intel_timeline tl;
 	unsigned long end_time, count;
 	u64 prng32_1M;
 	ktime_t kt;
@@ -286,7 +286,7 @@ static int bench_sync(void *arg)
 	do {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		__i915_timeline_sync_set(&tl, id, 0);
+		__intel_timeline_sync_set(&tl, id, 0);
 		count++;
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
@@ -301,7 +301,7 @@ static int bench_sync(void *arg)
 	while (end_time--) {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+		if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
 			mock_timeline_fini(&tl);
 			pr_err("Lookup of %llu failed\n", id);
 			return -EINVAL;
@@ -322,7 +322,7 @@ static int bench_sync(void *arg)
 	kt = ktime_get();
 	end_time = jiffies + HZ/10;
 	do {
-		__i915_timeline_sync_set(&tl, count++, 0);
+		__intel_timeline_sync_set(&tl, count++, 0);
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
 	pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -332,7 +332,7 @@ static int bench_sync(void *arg)
 	end_time = count;
 	kt = ktime_get();
 	while (end_time--) {
-		if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
+		if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
 			pr_err("Lookup of %lu failed\n", end_time);
 			mock_timeline_fini(&tl);
 			return -EINVAL;
@@ -356,8 +356,8 @@ static int bench_sync(void *arg)
 		u32 id = random_engine(&prng);
 		u32 seqno = prandom_u32_state(&prng);
 
-		if (!__i915_timeline_sync_is_later(&tl, id, seqno))
-			__i915_timeline_sync_set(&tl, id, seqno);
+		if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+			__intel_timeline_sync_set(&tl, id, seqno);
 
 		count++;
 	} while (!time_after(jiffies, end_time));
@@ -385,8 +385,8 @@ static int bench_sync(void *arg)
 			 */
 			u64 id = (u64)(count & mask) << order;
 
-			__i915_timeline_sync_is_later(&tl, id, 0);
-			__i915_timeline_sync_set(&tl, id, 0);
+			__intel_timeline_sync_is_later(&tl, id, 0);
+			__intel_timeline_sync_set(&tl, id, 0);
 
 			count++;
 		} while (!time_after(jiffies, end_time));
@@ -401,7 +401,7 @@ static int bench_sync(void *arg)
 	return 0;
 }
 
-int i915_timeline_mock_selftests(void)
+int intel_timeline_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(mock_hwsp_freelist),
@@ -443,14 +443,14 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
 }
 
 static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
 {
 	struct i915_request *rq;
 	int err;
 
-	lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
+	lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
 
-	err = i915_timeline_pin(tl);
+	err = intel_timeline_pin(tl);
 	if (err) {
 		rq = ERR_PTR(err);
 		goto out;
@@ -466,26 +466,26 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
 		rq = ERR_PTR(err);
 
 out_unpin:
-	i915_timeline_unpin(tl);
+	intel_timeline_unpin(tl);
 out:
 	if (IS_ERR(rq))
 		pr_err("Failed to write to timeline!\n");
 	return rq;
 }
 
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
+static struct intel_timeline *
+checked_intel_timeline_create(struct drm_i915_private *i915)
 {
-	struct i915_timeline *tl;
+	struct intel_timeline *tl;
 
-	tl = i915_timeline_create(i915, NULL);
+	tl = intel_timeline_create(&i915->gt, NULL);
 	if (IS_ERR(tl))
 		return tl;
 
 	if (*tl->hwsp_seqno != tl->seqno) {
 		pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
 		       *tl->hwsp_seqno, tl->seqno);
-		i915_timeline_put(tl);
+		intel_timeline_put(tl);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -496,7 +496,7 @@ static int live_hwsp_engine(void *arg)
 {
 #define NUM_TIMELINES 4096
 	struct drm_i915_private *i915 = arg;
-	struct i915_timeline **timelines;
+	struct intel_timeline **timelines;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
@@ -523,10 +523,10 @@ static int live_hwsp_engine(void *arg)
 			continue;
 
 		for (n = 0; n < NUM_TIMELINES; n++) {
-			struct i915_timeline *tl;
+			struct intel_timeline *tl;
 			struct i915_request *rq;
 
-			tl = checked_i915_timeline_create(i915);
+			tl = checked_intel_timeline_create(i915);
 			if (IS_ERR(tl)) {
 				err = PTR_ERR(tl);
 				goto out;
@@ -534,7 +534,7 @@ static int live_hwsp_engine(void *arg)
 
 			rq = tl_write(tl, engine, count);
 			if (IS_ERR(rq)) {
-				i915_timeline_put(tl);
+				intel_timeline_put(tl);
 				err = PTR_ERR(rq);
 				goto out;
 			}
@@ -548,14 +548,14 @@ static int live_hwsp_engine(void *arg)
 		err = -EIO;
 
 	for (n = 0; n < count; n++) {
-		struct i915_timeline *tl = timelines[n];
+		struct intel_timeline *tl = timelines[n];
 
 		if (!err && *tl->hwsp_seqno != n) {
 			pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
 			       n, *tl->hwsp_seqno);
 			err = -EINVAL;
 		}
-		i915_timeline_put(tl);
+		intel_timeline_put(tl);
 	}
 
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -571,7 +571,7 @@ static int live_hwsp_alternate(void *arg)
 {
 #define NUM_TIMELINES 4096
 	struct drm_i915_private *i915 = arg;
-	struct i915_timeline **timelines;
+	struct intel_timeline **timelines;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
@@ -596,13 +596,13 @@ static int live_hwsp_alternate(void *arg)
 	count = 0;
 	for (n = 0; n < NUM_TIMELINES; n++) {
 		for_each_engine(engine, i915, id) {
-			struct i915_timeline *tl;
+			struct intel_timeline *tl;
 			struct i915_request *rq;
 
 			if (!intel_engine_can_store_dword(engine))
 				continue;
 
-			tl = checked_i915_timeline_create(i915);
+			tl = checked_intel_timeline_create(i915);
 			if (IS_ERR(tl)) {
 				err = PTR_ERR(tl);
 				goto out;
@@ -610,7 +610,7 @@ static int live_hwsp_alternate(void *arg)
 
 			rq = tl_write(tl, engine, count);
 			if (IS_ERR(rq)) {
-				i915_timeline_put(tl);
+				intel_timeline_put(tl);
 				err = PTR_ERR(rq);
 				goto out;
 			}
@@ -624,14 +624,14 @@ static int live_hwsp_alternate(void *arg)
 		err = -EIO;
 
 	for (n = 0; n < count; n++) {
-		struct i915_timeline *tl = timelines[n];
+		struct intel_timeline *tl = timelines[n];
 
 		if (!err && *tl->hwsp_seqno != n) {
 			pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
 			       n, *tl->hwsp_seqno);
 			err = -EINVAL;
 		}
-		i915_timeline_put(tl);
+		intel_timeline_put(tl);
 	}
 
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -647,7 +647,7 @@ static int live_hwsp_wrap(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine;
-	struct i915_timeline *tl;
+	struct intel_timeline *tl;
 	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
 	int err = 0;
@@ -660,7 +660,7 @@ static int live_hwsp_wrap(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-	tl = i915_timeline_create(i915, NULL);
+	tl = intel_timeline_create(&i915->gt, NULL);
 	if (IS_ERR(tl)) {
 		err = PTR_ERR(tl);
 		goto out_rpm;
@@ -668,7 +668,7 @@ static int live_hwsp_wrap(void *arg)
 	if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
 		goto out_free;
 
-	err = i915_timeline_pin(tl);
+	err = intel_timeline_pin(tl);
 	if (err)
 		goto out_free;
 
@@ -688,7 +688,7 @@ static int live_hwsp_wrap(void *arg)
 
 		tl->seqno = -4u;
 
-		err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+		err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
 		if (err) {
 			i915_request_add(rq);
 			goto out;
@@ -703,7 +703,7 @@ static int live_hwsp_wrap(void *arg)
 		}
 		hwsp_seqno[0] = tl->hwsp_seqno;
 
-		err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+		err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
 		if (err) {
 			i915_request_add(rq);
 			goto out;
@@ -745,9 +745,9 @@ static int live_hwsp_wrap(void *arg)
 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
 		err = -EIO;
 
-	i915_timeline_unpin(tl);
+	intel_timeline_unpin(tl);
 out_free:
-	i915_timeline_put(tl);
+	intel_timeline_put(tl);
 out_rpm:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -781,10 +781,10 @@ static int live_hwsp_recycle(void *arg)
 			continue;
 
 		do {
-			struct i915_timeline *tl;
+			struct intel_timeline *tl;
 			struct i915_request *rq;
 
-			tl = checked_i915_timeline_create(i915);
+			tl = checked_intel_timeline_create(i915);
 			if (IS_ERR(tl)) {
 				err = PTR_ERR(tl);
 				goto out;
@@ -792,14 +792,14 @@ static int live_hwsp_recycle(void *arg)
 
 			rq = tl_write(tl, engine, count);
 			if (IS_ERR(rq)) {
-				i915_timeline_put(tl);
+				intel_timeline_put(tl);
 				err = PTR_ERR(rq);
 				goto out;
 			}
 
 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 				pr_err("Wait for timeline writes timed out!\n");
-				i915_timeline_put(tl);
+				intel_timeline_put(tl);
 				err = -EIO;
 				goto out;
 			}
@@ -810,26 +810,24 @@ static int live_hwsp_recycle(void *arg)
 				err = -EINVAL;
 			}
 
-			i915_timeline_put(tl);
+			intel_timeline_put(tl);
 			count++;
 
 			if (err)
 				goto out;
 
-			i915_timelines_park(i915); /* Encourage recycling! */
+			intel_timelines_park(i915); /* Encourage recycling! */
 		} while (!__igt_timeout(end_time, NULL));
 	}
 
 out:
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	return err;
 }
 
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_hwsp_recycle),
@@ -841,5 +839,5 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
 	if (i915_terminally_wedged(i915))
 		return 0;
 
-	return i915_subtests(tests, i915);
+	return i915_live_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 9eaf030..fa01ea7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -5,6 +5,7 @@
  */
 
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
 #include "i915_selftest.h"
 #include "intel_reset.h"
 
@@ -102,7 +103,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	i915_gem_object_flush_map(result);
 	i915_gem_object_unpin_map(result);
 
-	vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err_obj;
@@ -286,7 +287,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 					const char *name)
 {
 	struct drm_i915_private *i915 = engine->i915;
-	struct i915_gem_context *ctx;
+	struct i915_gem_context *ctx, *tmp;
 	struct igt_spinner spin;
 	intel_wakeref_t wakeref;
 	int err;
@@ -294,56 +295,59 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 	pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
 		engine->whitelist.count, name);
 
-	err = igt_spinner_init(&spin, i915);
-	if (err)
-		return err;
-
 	ctx = kernel_context(i915);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
+	err = igt_spinner_init(&spin, i915);
+	if (err)
+		goto out_ctx;
+
 	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *before* %s reset!\n", name);
-		goto out;
+		goto out_spin;
 	}
 
 	err = switch_to_scratch_context(engine, &spin);
 	if (err)
-		goto out;
+		goto out_spin;
 
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 		err = reset(engine);
 
 	igt_spinner_end(&spin);
-	igt_spinner_fini(&spin);
 
 	if (err) {
 		pr_err("%s reset failed\n", name);
-		goto out;
+		goto out_spin;
 	}
 
 	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Whitelist not preserved in context across %s reset!\n",
 		       name);
-		goto out;
+		goto out_spin;
 	}
 
+	tmp = kernel_context(i915);
+	if (IS_ERR(tmp)) {
+		err = PTR_ERR(tmp);
+		goto out_spin;
+	}
 	kernel_context_close(ctx);
-
-	ctx = kernel_context(i915);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ctx = tmp;
 
 	err = check_whitelist(ctx, engine);
 	if (err) {
 		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
 		       name);
-		goto out;
+		goto out_spin;
 	}
 
-out:
+out_spin:
+	igt_spinner_fini(&spin);
+out_ctx:
 	kernel_context_close(ctx);
 	return err;
 }
@@ -542,7 +546,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
 
 		i915_gem_object_flush_map(batch->obj);
 		i915_gem_object_unpin_map(batch->obj);
-		i915_gem_chipset_flush(ctx->i915);
+		intel_gt_chipset_flush(engine->gt);
 
 		rq = igt_request_alloc(ctx, engine);
 		if (IS_ERR(rq)) {
@@ -806,7 +810,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
 	*cs++ = MI_BATCH_BUFFER_END;
 
 	i915_gem_object_flush_map(batch->obj);
-	i915_gem_chipset_flush(ctx->i915);
+	intel_gt_chipset_flush(engine->gt);
 
 	rq = igt_request_alloc(ctx, engine);
 	if (IS_ERR(rq)) {
@@ -925,7 +929,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
 
 	err = 0;
 	for (i = 0; i < engine->whitelist.count; i++) {
-		if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+		const struct i915_wa *wa = &engine->whitelist.list[i];
+
+		if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+			continue;
+
+		if (!fn(engine, a[i], b[i], wa->reg))
 			err = -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
similarity index 68%
rename from drivers/gpu/drm/i915/selftests/mock_timeline.c
rename to drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 65b52be..5c54920 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -4,13 +4,13 @@
  * Copyright © 2017-2018 Intel Corporation
  */
 
-#include "../i915_timeline.h"
+#include "../intel_timeline.h"
 
 #include "mock_timeline.h"
 
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
+void mock_timeline_init(struct intel_timeline *timeline, u64 context)
 {
-	timeline->i915 = NULL;
+	timeline->gt = NULL;
 	timeline->fence_context = context;
 
 	mutex_init(&timeline->mutex);
@@ -23,7 +23,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 	INIT_LIST_HEAD(&timeline->link);
 }
 
-void mock_timeline_fini(struct i915_timeline *timeline)
+void mock_timeline_fini(struct intel_timeline *timeline)
 {
 	i915_syncmap_free(&timeline->sync);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
new file mode 100644
index 0000000..689efc6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#ifndef __MOCK_TIMELINE__
+#define __MOCK_TIMELINE__
+
+struct intel_timeline;
+
+void mock_timeline_init(struct intel_timeline *timeline, u64 context);
+void mock_timeline_fini(struct intel_timeline *timeline);
+
+#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 293e5bc..13f304a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include <linux/debugobjects.h>
+
 #include "gt/intel_engine_pm.h"
 
 #include "i915_drv.h"
@@ -31,49 +33,108 @@ struct active_node {
 	u64 timeline;
 };
 
-static void
-__active_park(struct i915_active *ref)
-{
-	struct active_node *it, *n;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
 
-	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-		GEM_BUG_ON(i915_active_request_isset(&it->base));
-		kmem_cache_free(global.slab_cache, it);
-	}
-	ref->tree = RB_ROOT;
+static void *active_debug_hint(void *addr)
+{
+	struct i915_active *ref = addr;
+
+	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
 }
 
+static struct debug_obj_descr active_debug_desc = {
+	.name = "i915_active",
+	.debug_hint = active_debug_hint,
+};
+
+static void debug_active_init(struct i915_active *ref)
+{
+	debug_object_init(ref, &active_debug_desc);
+}
+
+static void debug_active_activate(struct i915_active *ref)
+{
+	debug_object_activate(ref, &active_debug_desc);
+}
+
+static void debug_active_deactivate(struct i915_active *ref)
+{
+	debug_object_deactivate(ref, &active_debug_desc);
+}
+
+static void debug_active_fini(struct i915_active *ref)
+{
+	debug_object_free(ref, &active_debug_desc);
+}
+
+static void debug_active_assert(struct i915_active *ref)
+{
+	debug_object_assert_init(ref, &active_debug_desc);
+}
+
+#else
+
+static inline void debug_active_init(struct i915_active *ref) { }
+static inline void debug_active_activate(struct i915_active *ref) { }
+static inline void debug_active_deactivate(struct i915_active *ref) { }
+static inline void debug_active_fini(struct i915_active *ref) { }
+static inline void debug_active_assert(struct i915_active *ref) { }
+
+#endif
+
 static void
 __active_retire(struct i915_active *ref)
 {
-	GEM_BUG_ON(!ref->count);
-	if (--ref->count)
+	struct active_node *it, *n;
+	struct rb_root root;
+	bool retire = false;
+
+	lockdep_assert_held(&ref->mutex);
+
+	/* return the unused nodes to our slabcache -- flushing the allocator */
+	if (atomic_dec_and_test(&ref->count)) {
+		debug_active_deactivate(ref);
+		root = ref->tree;
+		ref->tree = RB_ROOT;
+		ref->cache = NULL;
+		retire = true;
+	}
+
+	mutex_unlock(&ref->mutex);
+	if (!retire)
 		return;
 
-	/* return the unused nodes to our slabcache */
-	__active_park(ref);
-
 	ref->retire(ref);
+
+	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+		GEM_BUG_ON(i915_active_request_isset(&it->base));
+		kmem_cache_free(global.slab_cache, it);
+	}
+}
+
+static void
+active_retire(struct i915_active *ref)
+{
+	GEM_BUG_ON(!atomic_read(&ref->count));
+	if (atomic_add_unless(&ref->count, -1, 1))
+		return;
+
+	/* One active may be flushed from inside the acquire of another */
+	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+	__active_retire(ref);
 }
 
 static void
 node_retire(struct i915_active_request *base, struct i915_request *rq)
 {
-	__active_retire(container_of(base, struct active_node, base)->ref);
-}
-
-static void
-last_retire(struct i915_active_request *base, struct i915_request *rq)
-{
-	__active_retire(container_of(base, struct i915_active, last));
+	active_retire(container_of(base, struct active_node, base)->ref);
 }
 
 static struct i915_active_request *
 active_instance(struct i915_active *ref, u64 idx)
 {
-	struct active_node *node;
+	struct active_node *node, *prealloc;
 	struct rb_node **p, *parent;
-	struct i915_request *old;
 
 	/*
 	 * We track the most recently used timeline to skip a rbtree search
@@ -81,20 +142,18 @@ active_instance(struct i915_active *ref, u64 idx)
 	 * at all. We can reuse the last slot if it is empty, that is
 	 * after the previous activity has been retired, or if it matches the
 	 * current timeline.
-	 *
-	 * Note that we allow the timeline to be active simultaneously in
-	 * the rbtree and the last cache. We do this to avoid having
-	 * to search and replace the rbtree element for a new timeline, with
-	 * the cost being that we must be aware that the ref may be retired
-	 * twice for the same timeline (as the older rbtree element will be
-	 * retired before the new request added to last).
 	 */
-	old = i915_active_request_raw(&ref->last, BKL(ref));
-	if (!old || old->fence.context == idx)
-		goto out;
+	node = READ_ONCE(ref->cache);
+	if (node && node->timeline == idx)
+		return &node->base;
 
-	/* Move the currently active fence into the rbtree */
-	idx = old->fence.context;
+	/* Preallocate a replacement, just in case */
+	prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+	if (!prealloc)
+		return NULL;
+
+	mutex_lock(&ref->mutex);
+	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	parent = NULL;
 	p = &ref->tree.rb_node;
@@ -102,8 +161,10 @@ active_instance(struct i915_active *ref, u64 idx)
 		parent = *p;
 
 		node = rb_entry(parent, struct active_node, node);
-		if (node->timeline == idx)
-			goto replace;
+		if (node->timeline == idx) {
+			kmem_cache_free(global.slab_cache, prealloc);
+			goto out;
+		}
 
 		if (node->timeline < idx)
 			p = &parent->rb_right;
@@ -111,17 +172,7 @@ active_instance(struct i915_active *ref, u64 idx)
 			p = &parent->rb_left;
 	}
 
-	node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-
-	/* kmalloc may retire the ref->last (thanks shrinker)! */
-	if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
-		kmem_cache_free(global.slab_cache, node);
-		goto out;
-	}
-
-	if (unlikely(!node))
-		return ERR_PTR(-ENOMEM);
-
+	node = prealloc;
 	i915_active_request_init(&node->base, NULL, node_retire);
 	node->ref = ref;
 	node->timeline = idx;
@@ -129,38 +180,29 @@ active_instance(struct i915_active *ref, u64 idx)
 	rb_link_node(&node->node, parent, p);
 	rb_insert_color(&node->node, &ref->tree);
 
-replace:
-	/*
-	 * Overwrite the previous active slot in the rbtree with last,
-	 * leaving last zeroed. If the previous slot is still active,
-	 * we must be careful as we now only expect to receive one retire
-	 * callback not two, and so much undo the active counting for the
-	 * overwritten slot.
-	 */
-	if (i915_active_request_isset(&node->base)) {
-		/* Retire ourselves from the old rq->active_list */
-		__list_del_entry(&node->base.link);
-		ref->count--;
-		GEM_BUG_ON(!ref->count);
-	}
-	GEM_BUG_ON(list_empty(&ref->last.link));
-	list_replace_init(&ref->last.link, &node->base.link);
-	node->base.request = fetch_and_zero(&ref->last.request);
-
 out:
-	return &ref->last;
+	ref->cache = node;
+	mutex_unlock(&ref->mutex);
+
+	return &node->base;
 }
 
-void i915_active_init(struct drm_i915_private *i915,
-		      struct i915_active *ref,
-		      void (*retire)(struct i915_active *ref))
+void __i915_active_init(struct drm_i915_private *i915,
+			struct i915_active *ref,
+			int (*active)(struct i915_active *ref),
+			void (*retire)(struct i915_active *ref),
+			struct lock_class_key *key)
 {
+	debug_active_init(ref);
+
 	ref->i915 = i915;
+	ref->active = active;
 	ref->retire = retire;
 	ref->tree = RB_ROOT;
-	i915_active_request_init(&ref->last, NULL, last_retire);
+	ref->cache = NULL;
 	init_llist_head(&ref->barriers);
-	ref->count = 0;
+	atomic_set(&ref->count, 0);
+	__mutex_init(&ref->mutex, "i915_active", key);
 }
 
 int i915_active_ref(struct i915_active *ref,
@@ -168,60 +210,92 @@ int i915_active_ref(struct i915_active *ref,
 		    struct i915_request *rq)
 {
 	struct i915_active_request *active;
-	int err = 0;
+	int err;
 
 	/* Prevent reaping in case we malloc/wait while building the tree */
-	i915_active_acquire(ref);
+	err = i915_active_acquire(ref);
+	if (err)
+		return err;
 
 	active = active_instance(ref, timeline);
-	if (IS_ERR(active)) {
-		err = PTR_ERR(active);
+	if (!active) {
+		err = -ENOMEM;
 		goto out;
 	}
 
 	if (!i915_active_request_isset(active))
-		ref->count++;
+		atomic_inc(&ref->count);
 	__i915_active_request_set(active, rq);
 
-	GEM_BUG_ON(!ref->count);
 out:
 	i915_active_release(ref);
 	return err;
 }
 
-bool i915_active_acquire(struct i915_active *ref)
+int i915_active_acquire(struct i915_active *ref)
 {
-	lockdep_assert_held(BKL(ref));
-	return !ref->count++;
+	int err;
+
+	debug_active_assert(ref);
+	if (atomic_add_unless(&ref->count, 1, 0))
+		return 0;
+
+	err = mutex_lock_interruptible(&ref->mutex);
+	if (err)
+		return err;
+
+	if (!atomic_read(&ref->count) && ref->active)
+		err = ref->active(ref);
+	if (!err) {
+		debug_active_activate(ref);
+		atomic_inc(&ref->count);
+	}
+
+	mutex_unlock(&ref->mutex);
+
+	return err;
 }
 
 void i915_active_release(struct i915_active *ref)
 {
-	lockdep_assert_held(BKL(ref));
-	__active_retire(ref);
+	debug_active_assert(ref);
+	active_retire(ref);
 }
 
 int i915_active_wait(struct i915_active *ref)
 {
 	struct active_node *it, *n;
-	int ret = 0;
+	int err;
 
-	if (i915_active_acquire(ref))
-		goto out_release;
+	might_sleep();
+	might_lock(&ref->mutex);
 
-	ret = i915_active_request_retire(&ref->last, BKL(ref));
-	if (ret)
-		goto out_release;
+	if (RB_EMPTY_ROOT(&ref->tree))
+		return 0;
+
+	err = mutex_lock_interruptible(&ref->mutex);
+	if (err)
+		return err;
+
+	if (!atomic_add_unless(&ref->count, 1, 0)) {
+		mutex_unlock(&ref->mutex);
+		return 0;
+	}
 
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-		ret = i915_active_request_retire(&it->base, BKL(ref));
-		if (ret)
+		err = i915_active_request_retire(&it->base, BKL(ref));
+		if (err)
 			break;
 	}
 
-out_release:
-	i915_active_release(ref);
-	return ret;
+	__active_retire(ref);
+	if (err)
+		return err;
+
+	if (!i915_active_is_idle(ref))
+		return -EBUSY;
+
+	return 0;
 }
 
 int i915_request_await_active_request(struct i915_request *rq,
@@ -236,23 +310,24 @@ int i915_request_await_active_request(struct i915_request *rq,
 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 {
 	struct active_node *it, *n;
-	int err = 0;
+	int err;
+
+	if (RB_EMPTY_ROOT(&ref->tree))
+		return 0;
 
 	/* await allocates and so we need to avoid hitting the shrinker */
-	if (i915_active_acquire(ref))
-		goto out; /* was idle */
-
-	err = i915_request_await_active_request(rq, &ref->last);
+	err = i915_active_acquire(ref);
 	if (err)
-		goto out;
+		return err;
 
+	mutex_lock(&ref->mutex);
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 		err = i915_request_await_active_request(rq, &it->base);
 		if (err)
-			goto out;
+			break;
 	}
+	mutex_unlock(&ref->mutex);
 
-out:
 	i915_active_release(ref);
 	return err;
 }
@@ -260,9 +335,10 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 void i915_active_fini(struct i915_active *ref)
 {
-	GEM_BUG_ON(i915_active_request_isset(&ref->last));
+	debug_active_fini(ref);
 	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
-	GEM_BUG_ON(ref->count);
+	GEM_BUG_ON(atomic_read(&ref->count));
+	mutex_destroy(&ref->mutex);
 }
 #endif
 
@@ -289,7 +365,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					 (void *)engine, node_retire);
 		node->timeline = kctx->ring->timeline->fence_context;
 		node->ref = ref;
-		ref->count++;
+		atomic_inc(&ref->count);
 
 		intel_engine_pm_get(engine);
 		llist_add((struct llist_node *)&node->base.link,
@@ -316,8 +392,9 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 {
 	struct llist_node *pos, *next;
 
-	i915_active_acquire(ref);
+	GEM_BUG_ON(i915_active_is_idle(ref));
 
+	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
 	llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
 		struct intel_engine_cs *engine;
 		struct active_node *node;
@@ -347,7 +424,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 			  &engine->barrier_tasks);
 		intel_engine_pm_put(engine);
 	}
-	i915_active_release(ref);
+	mutex_unlock(&ref->mutex);
 }
 
 void i915_request_add_barriers(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index c14eebf..134166d 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -369,9 +369,16 @@ i915_active_request_retire(struct i915_active_request *active,
  * synchronisation.
  */
 
-void i915_active_init(struct drm_i915_private *i915,
-		      struct i915_active *ref,
-		      void (*retire)(struct i915_active *ref));
+void __i915_active_init(struct drm_i915_private *i915,
+			struct i915_active *ref,
+			int (*active)(struct i915_active *ref),
+			void (*retire)(struct i915_active *ref),
+			struct lock_class_key *key);
+#define i915_active_init(i915, ref, active, retire) do {		\
+	static struct lock_class_key __key;				\
+									\
+	__i915_active_init(i915, ref, active, retire, &__key);		\
+} while (0)
 
 int i915_active_ref(struct i915_active *ref,
 		    u64 timeline,
@@ -384,20 +391,14 @@ int i915_request_await_active(struct i915_request *rq,
 int i915_request_await_active_request(struct i915_request *rq,
 				      struct i915_active_request *active);
 
-bool i915_active_acquire(struct i915_active *ref);
-
-static inline void i915_active_cancel(struct i915_active *ref)
-{
-	GEM_BUG_ON(ref->count != 1);
-	ref->count = 0;
-}
-
+int i915_active_acquire(struct i915_active *ref);
 void i915_active_release(struct i915_active *ref);
+void __i915_active_release_nested(struct i915_active *ref, int subclass);
 
 static inline bool
 i915_active_is_idle(const struct i915_active *ref)
 {
-	return !ref->count;
+	return !atomic_read(&ref->count);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index c025991..5b0a302 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,7 +7,9 @@
 #ifndef _I915_ACTIVE_TYPES_H_
 #define _I915_ACTIVE_TYPES_H_
 
+#include <linux/atomic.h>
 #include <linux/llist.h>
+#include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
 
@@ -24,13 +26,17 @@ struct i915_active_request {
 	i915_active_retire_fn retire;
 };
 
+struct active_node;
+
 struct i915_active {
 	struct drm_i915_private *i915;
 
+	struct active_node *cache;
 	struct rb_root tree;
-	struct i915_active_request last;
-	unsigned int count;
+	struct mutex mutex;
+	atomic_t count;
 
+	int (*active)(struct i915_active *ref);
 	void (*retire)(struct i915_active *ref);
 
 	struct llist_head barriers;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62cf34d..3e4f58f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -75,11 +75,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
 	return 0;
 }
 
-static char get_active_flag(struct drm_i915_gem_object *obj)
-{
-	return i915_gem_object_is_active(obj) ? '*' : ' ';
-}
-
 static char get_pin_flag(struct drm_i915_gem_object *obj)
 {
 	return obj->pin_global ? 'p' : ' ';
@@ -144,9 +139,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	unsigned int frontbuffer_bits;
 	int pin_count = 0;
 
-	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
+	seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
 		   &obj->base,
-		   get_active_flag(obj),
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   get_global_flag(obj),
@@ -1082,8 +1076,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
-	u64 acthd[I915_NUM_ENGINES];
-	struct intel_instdone instdone;
 	intel_wakeref_t wakeref;
 	enum intel_engine_id id;
 
@@ -1098,13 +1090,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 		return 0;
 	}
 
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		for_each_engine(engine, dev_priv, id)
-			acthd[id] = intel_engine_get_active_head(engine);
-
-		intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
-	}
-
 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@@ -1116,23 +1101,25 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
 
-	for_each_engine(engine, dev_priv, id) {
-		seq_printf(m, "%s: %d ms ago\n",
-			   engine->name,
-			   jiffies_to_msecs(jiffies -
-					    engine->hangcheck.action_timestamp));
+	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+		for_each_engine(engine, dev_priv, id) {
+			struct intel_instdone instdone;
 
-		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
-			   (long long)engine->hangcheck.acthd,
-			   (long long)acthd[id]);
+			seq_printf(m, "%s: %d ms ago\n",
+				   engine->name,
+				   jiffies_to_msecs(jiffies -
+						    engine->hangcheck.action_timestamp));
 
-		if (engine->id == RCS0) {
+			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+				   (long long)engine->hangcheck.acthd,
+				   intel_engine_get_active_head(engine));
+
+			intel_engine_get_instdone(engine, &instdone);
+
 			seq_puts(m, "\tinstdone read =\n");
-
 			i915_instdone_info(dev_priv, m, &instdone);
 
 			seq_puts(m, "\tinstdone accu =\n");
-
 			i915_instdone_info(dev_priv, m,
 					   &engine->hangcheck.instdone);
 		}
@@ -2966,14 +2953,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
-	const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
-	struct i915_wa *wa;
-	unsigned int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	seq_printf(m, "Workarounds applied: %u\n", wal->count);
-	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
-		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
-			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
+	for_each_engine(engine, i915, id) {
+		const struct i915_wa_list *wal = &engine->ctx_wa_list;
+		const struct i915_wa *wa;
+		unsigned int count;
+
+		count = wal->count;
+		if (!count)
+			continue;
+
+		seq_printf(m, "%s: Workarounds applied: %u\n",
+			   engine->name, count);
+
+		for (wa = wal->list; count--; wa++)
+			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+				   i915_mmio_reg_offset(wa->reg),
+				   wa->val, wa->mask);
+
+		seq_printf(m, "\n");
+	}
 
 	return 0;
 }
@@ -4087,7 +4088,7 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
 	/* Synchronize with everything first in case there's been an HPD
 	 * storm, but we haven't finished handling it in the kernel yet
 	 */
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 	flush_work(&dev_priv->hotplug.dig_port_work);
 	flush_work(&dev_priv->hotplug.hotplug_work);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e339..12182d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,6 +61,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_reset.h"
 #include "gt/intel_workarounds.h"
@@ -219,6 +220,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
 		WARN_ON(!IS_ICELAKE(dev_priv));
 		return PCH_ICP;
 	case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+	case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
 		DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
 		WARN_ON(!IS_ELKHARTLAKE(dev_priv));
 		return PCH_MCC;
@@ -754,7 +756,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 cleanup_modeset:
 	intel_modeset_cleanup(dev);
 cleanup_irq:
-	drm_irq_uninstall(dev);
+	intel_irq_uninstall(dev_priv);
 	intel_gmbus_teardown(dev_priv);
 cleanup_csr:
 	intel_csr_ucode_fini(dev_priv);
@@ -900,7 +902,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 
 	intel_device_info_subplatform_init(dev_priv);
 
-	intel_uncore_init_early(&dev_priv->uncore);
+	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
@@ -922,6 +924,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		goto err_engines;
 
+	intel_gt_init_early(&dev_priv->gt, dev_priv);
+
 	ret = i915_gem_init_early(dev_priv);
 	if (ret < 0)
 		goto err_workqueues;
@@ -1590,6 +1594,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_ggtt;
 
+	intel_gt_init_hw(dev_priv);
+
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to enable GGTT\n");
@@ -1629,7 +1635,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
 			   PM_QOS_DEFAULT_VALUE);
 
-	intel_uncore_sanitize(dev_priv);
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	intel_sanitize_gt_powersave(dev_priv);
 
 	intel_gt_init_workarounds(dev_priv);
 
@@ -1898,6 +1905,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
+	i915_detect_vgpu(dev_priv);
+
 	ret = i915_driver_init_mmio(dev_priv);
 	if (ret < 0)
 		goto out_runtime_pm_put;
@@ -1921,6 +1930,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 out_cleanup_hw:
 	i915_driver_cleanup_hw(dev_priv);
 	i915_ggtt_cleanup_hw(dev_priv);
+
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_sanitize_gt_powersave(dev_priv);
 out_cleanup_mmio:
 	i915_driver_cleanup_mmio(dev_priv);
 out_runtime_pm_put:
@@ -1991,6 +2003,10 @@ static void i915_driver_release(struct drm_device *dev)
 	i915_gem_fini(dev_priv);
 
 	i915_ggtt_cleanup_hw(dev_priv);
+
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_sanitize_gt_powersave(dev_priv);
+
 	i915_driver_cleanup_mmio(dev_priv);
 
 	enable_rpm_wakeref_asserts(rpm);
@@ -2348,7 +2364,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_uncore_resume_early(&dev_priv->uncore);
 
-	i915_check_and_clear_faults(dev_priv);
+	intel_gt_check_and_clear_faults(&dev_priv->gt);
 
 	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
 		gen9_sanitize_dc_state(dev_priv);
@@ -2357,11 +2373,11 @@ static int i915_drm_resume_early(struct drm_device *dev)
 		hsw_disable_pc8(dev_priv);
 	}
 
-	intel_uncore_sanitize(dev_priv);
+	intel_sanitize_gt_powersave(dev_priv);
 
 	intel_power_domains_resume(dev_priv);
 
-	intel_gt_sanitize(dev_priv, true);
+	intel_gt_sanitize(&dev_priv->gt, true);
 
 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
@@ -2944,7 +2960,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
 		intel_uc_resume(dev_priv);
 
-		i915_gem_init_swizzling(dev_priv);
+		intel_gt_init_swizzling(&dev_priv->gt);
 		i915_gem_restore_fences(dev_priv);
 
 		enable_rpm_wakeref_asserts(rpm);
@@ -3046,7 +3062,7 @@ static int intel_runtime_resume(struct device *kdev)
 	 * No point of rolling back things in case of an error, as the best
 	 * we can do is to hope that things will still work (and disable RPM).
 	 */
-	i915_gem_init_swizzling(dev_priv);
+	intel_gt_init_swizzling(&dev_priv->gt);
 	i915_gem_restore_fences(dev_priv);
 
 	/*
@@ -3216,6 +3232,9 @@ static struct drm_driver driver = {
 	.gem_prime_export = i915_gem_prime_export,
 	.gem_prime_import = i915_gem_prime_import,
 
+	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
+	.get_scanout_position = i915_get_crtc_scanoutpos,
+
 	.dumb_create = i915_gem_dumb_create,
 	.dumb_map_offset = i915_gem_mmap_gtt,
 	.ioctls = i915_ioctls,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc909ec..a9381e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,6 +72,7 @@
 
 #include "gt/intel_lrc.h"
 #include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
 #include "gt/intel_workarounds.h"
 
 #include "intel_device_info.h"
@@ -88,7 +89,7 @@
 #include "i915_gpu_error.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -98,8 +99,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20190619"
-#define DRIVER_TIMESTAMP	1560947544
+#define DRIVER_DATE		"20190708"
+#define DRIVER_TIMESTAMP	1562616546
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -286,14 +287,14 @@ struct drm_i915_display_funcs {
 			  enum pipe pipe);
 	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
 			     enum i9xx_plane_id i9xx_plane);
-	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
-	int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
+	int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
+	int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
 	void (*initial_watermarks)(struct intel_atomic_state *state,
-				   struct intel_crtc_state *cstate);
+				   struct intel_crtc_state *crtc_state);
 	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
-					 struct intel_crtc_state *cstate);
+					 struct intel_crtc_state *crtc_state);
 	void (*optimize_watermarks)(struct intel_atomic_state *state,
-				    struct intel_crtc_state *cstate);
+				    struct intel_crtc_state *crtc_state);
 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
 	void (*update_wm)(struct intel_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -306,10 +307,10 @@ struct drm_i915_display_funcs {
 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state);
 	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
-			    struct drm_atomic_state *old_state);
+			    struct intel_atomic_state *old_state);
 	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
-			     struct drm_atomic_state *old_state);
-	void (*update_crtcs)(struct drm_atomic_state *state);
+			     struct intel_atomic_state *old_state);
+	void (*update_crtcs)(struct intel_atomic_state *state);
 	void (*audio_codec_enable)(struct intel_encoder *encoder,
 				   const struct intel_crtc_state *crtc_state,
 				   const struct drm_connector_state *conn_state);
@@ -1402,8 +1403,6 @@ struct drm_i915_private {
 		u32 de_irq_mask[I915_MAX_PIPES];
 	};
 	u32 gt_irq_mask;
-	u32 pm_imr;
-	u32 pm_ier;
 	u32 pm_rps_events;
 	u32 pm_guc_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
@@ -1488,8 +1487,6 @@ struct drm_i915_private {
 	DECLARE_HASHTABLE(mm_structs, 7);
 	struct mutex mm_lock;
 
-	struct intel_ppat ppat;
-
 	/* Kernel Modesetting */
 
 	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1645,7 +1642,7 @@ struct drm_i915_private {
 		/*
 		 * Should be held around atomic WM register writing; also
 		 * protects * intel_crtc->wm.active and
-		 * cstate->wm.need_postvbl_update.
+		 * crtc_state->wm.need_postvbl_update.
 		 */
 		struct mutex wm_mutex;
 
@@ -1674,8 +1671,9 @@ struct drm_i915_private {
 	} dram_info;
 
 	struct intel_bw_info {
-		int num_planes;
-		int deratedbw[3];
+		unsigned int deratedbw[3]; /* for each QGV point */
+		u8 num_qgv_points;
+		u8 num_planes;
 	} max_bw[6];
 
 	struct drm_private_obj bw_obj;
@@ -1824,38 +1822,7 @@ struct drm_i915_private {
 	} perf;
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
-	struct {
-		struct i915_gt_timelines {
-			struct mutex mutex; /* protects list, tainted by GPU */
-			struct list_head active_list;
-
-			/* Pack multiple timelines' seqnos into the same page */
-			spinlock_t hwsp_lock;
-			struct list_head hwsp_free_list;
-		} timelines;
-
-		struct list_head active_rings;
-
-		struct intel_wakeref wakeref;
-
-		struct list_head closed_vma;
-		spinlock_t closed_lock; /* guards the list of closed_vma */
-
-		/**
-		 * Is the GPU currently considered idle, or busy executing
-		 * userspace requests? Whilst idle, we allow runtime power
-		 * management to power down the hardware and display clocks.
-		 * In order to reduce the effect on performance, there
-		 * is a slight delay before we do so.
-		 */
-		intel_wakeref_t awake;
-
-		struct blocking_notifier_head pm_notifications;
-
-		ktime_t last_init_time;
-
-		struct i915_vma *scratch;
-	} gt;
+	struct intel_gt gt;
 
 	struct {
 		struct notifier_block pm_notifier;
@@ -1950,11 +1917,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
 	return container_of(huc, struct drm_i915_private, huc);
 }
 
-static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
-{
-	return container_of(uncore, struct drm_i915_private, uncore);
-}
-
 /* Simple iterator over all initialised engines */
 #define for_each_engine(engine__, dev_priv__, id__) \
 	for ((id__) = 0; \
@@ -2358,6 +2320,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define INTEL_PCH_CMP_DEVICE_ID_TYPE		0x0280
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE		0x3480
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE		0x4B00
+#define INTEL_PCH_MCC2_DEVICE_ID_TYPE		0x3880
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
@@ -2480,18 +2443,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
 
 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
 {
-	if (!atomic_read(&i915->mm.free_count))
-		return;
-
-	/* A single pass should suffice to release all the freed objects (along
+	/*
+	 * A single pass should suffice to release all the freed objects (along
 	 * most call paths) , but be a little more paranoid in that freeing
 	 * the objects does take a little amount of time, during which the rcu
 	 * callbacks could have added new objects into the freed list, and
 	 * armed the work again.
 	 */
-	do {
+	while (atomic_read(&i915->mm.free_count)) {
+		flush_work(&i915->mm.free_work);
 		rcu_barrier();
-	} while (flush_work(&i915->mm.free_work));
+	}
 }
 
 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
@@ -2509,6 +2471,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
 	 */
 	int pass = 3;
 	do {
+		flush_workqueue(i915->wq);
 		rcu_barrier();
 		i915_gem_drain_freed_objects(i915);
 	} while (--pass);
@@ -2522,7 +2485,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags);
 
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+			   unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
@@ -2572,7 +2537,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
 void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
 void i915_gem_fini(struct drm_i915_private *dev_priv);
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
@@ -2635,16 +2599,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
 					 unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
-
-/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
-{
-	wmb();
-	if (INTEL_GEN(dev_priv) < 6)
-		intel_gtt_chipset_flush();
-}
-
 /* i915_gem_stolen.c */
 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
 				struct drm_mm_node *node, u64 size,
@@ -2829,11 +2783,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
 		return I915_HWS_CSB_WRITE_INDEX;
 }
 
-static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
-{
-	return i915_ggtt_offset(i915->gt.scratch);
-}
-
 static inline enum i915_map_type
 i915_coherent_map_type(struct drm_i915_private *i915)
 {
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 6621595..a327094 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -6,6 +6,11 @@
 #ifndef _I915_FIXED_H_
 #define _I915_FIXED_H_
 
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
 typedef struct {
 	u32 val;
 } uint_fixed_16_16_t;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54..7ade42b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,7 @@
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_pm.h"
 #include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_mocs.h"
 #include "gt/intel_reset.h"
@@ -101,7 +101,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+			   unsigned long flags)
 {
 	struct i915_vma *vma;
 	LIST_HEAD(still_in_list);
@@ -116,7 +117,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 		list_move_tail(&vma->obj_link, &still_in_list);
 		spin_unlock(&obj->vma.lock);
 
-		ret = i915_vma_unbind(vma);
+		ret = -EBUSY;
+		if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+		    !i915_vma_is_active(vma))
+			ret = i915_vma_unbind(vma);
 
 		spin_lock(&obj->vma.lock);
 	}
@@ -142,7 +146,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 		return -EFAULT;
 
 	drm_clflush_virt_range(vaddr, args->size);
-	i915_gem_chipset_flush(to_i915(obj->base.dev));
+	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 	return 0;
@@ -233,46 +237,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 			       &args->size, &args->handle);
 }
 
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
-{
-	intel_wakeref_t wakeref;
-
-	/*
-	 * No actual flushing is required for the GTT write domain for reads
-	 * from the GTT domain. Writes to it "immediately" go to main memory
-	 * as far as we know, so there's no chipset flush. It also doesn't
-	 * land in the GPU render cache.
-	 *
-	 * However, we do have to enforce the order so that all writes through
-	 * the GTT land before any writes to the device, such as updates to
-	 * the GATT itself.
-	 *
-	 * We also have to wait a bit for the writes to land from the GTT.
-	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
-	 * timing. This issue has only been observed when switching quickly
-	 * between GTT writes and CPU reads from inside the kernel on recent hw,
-	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
-	 * system agents we cannot reproduce this behaviour, until Cannonlake
-	 * that was!).
-	 */
-
-	wmb();
-
-	if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
-		return;
-
-	i915_gem_chipset_flush(dev_priv);
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		struct intel_uncore *uncore = &dev_priv->uncore;
-
-		spin_lock_irq(&uncore->lock);
-		intel_uncore_posting_read_fw(uncore,
-					     RING_HEAD(RENDER_RING_BASE));
-		spin_unlock_irq(&uncore->lock);
-	}
-}
-
 static int
 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
 	    bool needs_clflush)
@@ -947,8 +911,8 @@ static long
 wait_for_timelines(struct drm_i915_private *i915,
 		   unsigned int flags, long timeout)
 {
-	struct i915_gt_timelines *gt = &i915->gt.timelines;
-	struct i915_timeline *tl;
+	struct intel_gt_timelines *gt = &i915->gt.timelines;
+	struct intel_timeline *tl;
 
 	mutex_lock(&gt->mutex);
 	list_for_each_entry(tl, &gt->active_list, link) {
@@ -1196,82 +1160,72 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	 * it may impact the display and we are uncertain about the stability
 	 * of the reset, so this could be applied to even earlier gen.
 	 */
-	intel_gt_sanitize(i915, false);
+	intel_gt_sanitize(&i915->gt, false);
 
 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+static void init_unused_ring(struct intel_gt *gt, u32 base)
 {
-	if (INTEL_GEN(dev_priv) < 5 ||
-	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
-		return;
+	struct intel_uncore *uncore = gt->uncore;
 
-	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
-				 DISP_TILE_SURFACE_SWIZZLING);
-
-	if (IS_GEN(dev_priv, 5))
-		return;
-
-	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
-	if (IS_GEN(dev_priv, 6))
-		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
-	else if (IS_GEN(dev_priv, 7))
-		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
-	else if (IS_GEN(dev_priv, 8))
-		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
-	else
-		BUG();
+	intel_uncore_write(uncore, RING_CTL(base), 0);
+	intel_uncore_write(uncore, RING_HEAD(base), 0);
+	intel_uncore_write(uncore, RING_TAIL(base), 0);
+	intel_uncore_write(uncore, RING_START(base), 0);
 }
 
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
+static void init_unused_rings(struct intel_gt *gt)
 {
-	I915_WRITE(RING_CTL(base), 0);
-	I915_WRITE(RING_HEAD(base), 0);
-	I915_WRITE(RING_TAIL(base), 0);
-	I915_WRITE(RING_START(base), 0);
-}
+	struct drm_i915_private *i915 = gt->i915;
 
-static void init_unused_rings(struct drm_i915_private *dev_priv)
-{
-	if (IS_I830(dev_priv)) {
-		init_unused_ring(dev_priv, PRB1_BASE);
-		init_unused_ring(dev_priv, SRB0_BASE);
-		init_unused_ring(dev_priv, SRB1_BASE);
-		init_unused_ring(dev_priv, SRB2_BASE);
-		init_unused_ring(dev_priv, SRB3_BASE);
-	} else if (IS_GEN(dev_priv, 2)) {
-		init_unused_ring(dev_priv, SRB0_BASE);
-		init_unused_ring(dev_priv, SRB1_BASE);
-	} else if (IS_GEN(dev_priv, 3)) {
-		init_unused_ring(dev_priv, PRB1_BASE);
-		init_unused_ring(dev_priv, PRB2_BASE);
+	if (IS_I830(i915)) {
+		init_unused_ring(gt, PRB1_BASE);
+		init_unused_ring(gt, SRB0_BASE);
+		init_unused_ring(gt, SRB1_BASE);
+		init_unused_ring(gt, SRB2_BASE);
+		init_unused_ring(gt, SRB3_BASE);
+	} else if (IS_GEN(i915, 2)) {
+		init_unused_ring(gt, SRB0_BASE);
+		init_unused_ring(gt, SRB1_BASE);
+	} else if (IS_GEN(i915, 3)) {
+		init_unused_ring(gt, PRB1_BASE);
+		init_unused_ring(gt, PRB2_BASE);
 	}
 }
 
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+int i915_gem_init_hw(struct drm_i915_private *i915)
 {
+	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_gt *gt = &i915->gt;
 	int ret;
 
-	dev_priv->gt.last_init_time = ktime_get();
+	BUG_ON(!i915->kernel_context);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
+	gt->last_init_time = ktime_get();
 
 	/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
-	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
-		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
 
-	if (IS_HASWELL(dev_priv))
-		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
-			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+	if (IS_HASWELL(i915))
+		intel_uncore_write(uncore,
+				   MI_PREDICATE_RESULT_2,
+				   IS_HSW_GT3(i915) ?
+				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
 	/* Apply the GT workarounds... */
-	intel_gt_apply_workarounds(dev_priv);
+	intel_gt_apply_workarounds(gt);
 	/* ...and determine whether they are sticking. */
-	intel_gt_verify_workarounds(dev_priv, "init");
+	intel_gt_verify_workarounds(gt, "init");
 
-	i915_gem_init_swizzling(dev_priv);
+	intel_gt_init_swizzling(gt);
 
 	/*
 	 * At least 830 can leave some of the unused rings
@@ -1279,49 +1233,33 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	 * will prevent c3 entry. Makes sure all unused rings
 	 * are totally idle.
 	 */
-	init_unused_rings(dev_priv);
+	init_unused_rings(gt);
 
-	BUG_ON(!dev_priv->kernel_context);
-	ret = i915_terminally_wedged(dev_priv);
-	if (ret)
-		goto out;
-
-	ret = i915_ppgtt_init_hw(dev_priv);
+	ret = i915_ppgtt_init_hw(gt);
 	if (ret) {
 		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
 		goto out;
 	}
 
-	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
+	ret = intel_wopcm_init_hw(&i915->wopcm, gt);
 	if (ret) {
 		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
 		goto out;
 	}
 
 	/* We can't enable contexts until all firmware is loaded */
-	ret = intel_uc_init_hw(dev_priv);
+	ret = intel_uc_init_hw(i915);
 	if (ret) {
 		DRM_ERROR("Enabling uc failed (%d)\n", ret);
 		goto out;
 	}
 
-	intel_mocs_init_l3cc_table(dev_priv);
+	intel_mocs_init_l3cc_table(gt);
 
-	/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_engines_resume(dev_priv);
-	if (ret)
-		goto cleanup_uc;
+	intel_engines_set_scheduler_caps(i915);
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
-	intel_engines_set_scheduler_caps(dev_priv);
-	return 0;
-
-cleanup_uc:
-	intel_uc_fini_hw(dev_priv);
 out:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
+	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 	return ret;
 }
 
@@ -1453,39 +1391,12 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 static int
 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
 {
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	int ret;
-
-	obj = i915_gem_object_create_stolen(i915, size);
-	if (!obj)
-		obj = i915_gem_object_create_internal(i915, size);
-	if (IS_ERR(obj)) {
-		DRM_ERROR("Failed to allocate scratch page\n");
-		return PTR_ERR(obj);
-	}
-
-	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto err_unref;
-	}
-
-	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
-	if (ret)
-		goto err_unref;
-
-	i915->gt.scratch = vma;
-	return 0;
-
-err_unref:
-	i915_gem_object_put(obj);
-	return ret;
+	return intel_gt_init_scratch(&i915->gt, size);
 }
 
 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
 {
-	i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+	intel_gt_fini_scratch(&i915->gt);
 }
 
 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
@@ -1516,7 +1427,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-	i915_timelines_init(dev_priv);
+	intel_timelines_init(dev_priv);
 
 	ret = i915_gem_init_userptr(dev_priv);
 	if (ret)
@@ -1539,7 +1450,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
-	ret = i915_gem_init_ggtt(dev_priv);
+	ret = i915_init_ggtt(dev_priv);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
@@ -1580,6 +1491,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_uc_init;
 
+	/* Only when the HW is re-initialised, can we replay the requests */
+	ret = intel_gt_resume(&dev_priv->gt);
+	if (ret)
+		goto err_init_hw;
+
 	/*
 	 * Despite its name intel_init_clock_gating applies both display
 	 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1509,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	ret = intel_engines_verify_workarounds(dev_priv);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
 	ret = __intel_engines_record_defaults(dev_priv);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
 	if (i915_inject_load_failure()) {
 		ret = -ENODEV;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
 	if (i915_inject_load_failure()) {
 		ret = -EIO;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1536,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_init_hw:
+err_gt:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1546,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	i915_gem_drain_workqueue(dev_priv);
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
+err_init_hw:
 	intel_uc_fini_hw(dev_priv);
 err_uc_init:
 	intel_uc_fini(dev_priv);
@@ -1653,7 +1570,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	if (ret != -EIO) {
 		i915_gem_cleanup_userptr(dev_priv);
-		i915_timelines_fini(dev_priv);
+		intel_timelines_fini(dev_priv);
 	}
 
 	if (ret == -EIO) {
@@ -1717,7 +1634,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
 
 	intel_uc_fini_misc(dev_priv);
 	i915_gem_cleanup_userptr(dev_priv);
-	i915_timelines_fini(dev_priv);
+	intel_timelines_fini(dev_priv);
 
 	i915_gem_drain_freed_objects(dev_priv);
 
@@ -1746,12 +1663,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
 	int err;
 
-	intel_gt_pm_init(dev_priv);
-
-	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
-	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
-	spin_lock_init(&dev_priv->gt.closed_lock);
-
 	i915_gem_init__mm(dev_priv);
 	i915_gem_init__pm(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 25a3e4d..b17f239 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -94,34 +94,26 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	list = &pool->cache_list[n];
 
 	list_for_each_entry(obj, list, batch_pool_link) {
+		struct reservation_object *resv = obj->base.resv;
+
 		/* The batches are strictly LRU ordered */
-		if (i915_gem_object_is_active(obj)) {
-			struct reservation_object *resv = obj->base.resv;
+		if (!reservation_object_test_signaled_rcu(resv, true))
+			break;
 
-			if (!reservation_object_test_signaled_rcu(resv, true))
-				break;
-
-			i915_retire_requests(pool->engine->i915);
-			GEM_BUG_ON(i915_gem_object_is_active(obj));
-
-			/*
-			 * The object is now idle, clear the array of shared
-			 * fences before we add a new request. Although, we
-			 * remain on the same engine, we may be on a different
-			 * timeline and so may continually grow the array,
-			 * trapping a reference to all the old fences, rather
-			 * than replace the existing fence.
-			 */
-			if (rcu_access_pointer(resv->fence)) {
-				reservation_object_lock(resv, NULL);
-				reservation_object_add_excl_fence(resv, NULL);
-				reservation_object_unlock(resv);
-			}
+		/*
+		 * The object is now idle, clear the array of shared
+		 * fences before we add a new request. Although, we
+		 * remain on the same engine, we may be on a different
+		 * timeline and so may continually grow the array,
+		 * trapping a reference to all the old fences, rather
+		 * than replace the existing fence.
+		 */
+		if (rcu_access_pointer(resv->fence)) {
+			reservation_object_lock(resv, NULL);
+			reservation_object_add_excl_fence(resv, NULL);
+			reservation_object_unlock(resv);
 		}
 
-		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
-								 true));
-
 		if (obj->base.size >= size)
 			goto found;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0bf53ac..bcac359 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -834,3 +834,35 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
 
 	i915_gem_restore_fences(i915);
 }
+
+void intel_gt_init_swizzling(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+
+	if (INTEL_GEN(i915) < 5 ||
+	    i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+		return;
+
+	intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
+
+	if (IS_GEN(i915, 5))
+		return;
+
+	intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
+
+	if (IS_GEN(i915, 6))
+		intel_uncore_write(uncore,
+				   ARB_MODE,
+				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+	else if (IS_GEN(i915, 7))
+		intel_uncore_write(uncore,
+				   ARB_MODE,
+				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+	else if (IS_GEN(i915, 8))
+		intel_uncore_write(uncore,
+				   GAMTARBMODE,
+				   _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+	else
+		MISSING_CASE(INTEL_GEN(i915));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index d2da988..37e4f104 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -32,6 +32,7 @@ struct drm_i915_gem_object;
 struct drm_i915_private;
 struct i915_ggtt;
 struct i915_vma;
+struct intel_gt;
 struct sg_table;
 
 #define I965_FENCE_PAGE 4096UL
@@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
 
 void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
 
+void intel_gt_init_swizzling(struct intel_gt *gt);
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8ab8201..236c964 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -36,6 +36,7 @@
 #include <drm/i915_drm.h>
 
 #include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
 
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
@@ -109,9 +110,9 @@
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
-static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
+static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
 {
-	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
 
 	/*
 	 * Note that as an uncached mmio write, this will flush the
@@ -120,24 +121,19 @@ static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
 	intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
 }
 
-static void guc_ggtt_invalidate(struct drm_i915_private *i915)
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
 {
-	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
 
-	gen6_ggtt_invalidate(i915);
+	gen6_ggtt_invalidate(ggtt);
 	intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 }
 
-static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 {
 	intel_gtt_chipset_flush();
 }
 
-static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
-{
-	i915->ggtt.invalidate(i915);
-}
-
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 unused)
@@ -215,10 +211,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
-				  const enum i915_cache_level level)
+static u64 gen8_pde_encode(const dma_addr_t addr,
+			   const enum i915_cache_level level)
 {
-	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+	u64 pde = _PAGE_PRESENT | _PAGE_RW;
 	pde |= addr;
 	if (level != I915_CACHE_NONE)
 		pde |= PPAT_CACHED_PDE;
@@ -227,9 +223,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 	return pde;
 }
 
-#define gen8_pdpe_encode gen8_pde_encode
-#define gen8_pml4e_encode gen8_pde_encode
-
 static u64 snb_pte_encode(dma_addr_t addr,
 			  enum i915_cache_level level,
 			  u32 flags)
@@ -482,9 +475,69 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
 	spin_unlock(&vm->free_pages.lock);
 }
 
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+	spin_lock(&vm->free_pages.lock);
+	if (pagevec_count(&vm->free_pages.pvec))
+		vm_free_pages_release(vm, true);
+	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+	spin_unlock(&vm->free_pages.lock);
+
+	drm_mm_takedown(&vm->mm);
+
+	mutex_destroy(&vm->mutex);
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
+{
+	struct list_head *phases[] = {
+		&vm->bound_list,
+		&vm->unbound_list,
+		NULL,
+	}, **phase;
+
+	mutex_lock(&vm->i915->drm.struct_mutex);
+	for (phase = phases; *phase; phase++) {
+		struct i915_vma *vma, *vn;
+
+		list_for_each_entry_safe(vma, vn, *phase, vm_link)
+			i915_vma_destroy(vma);
+	}
+	mutex_unlock(&vm->i915->drm.struct_mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+	struct i915_address_space *vm =
+		container_of(work, struct i915_address_space, rcu.work);
+
+	ppgtt_destroy_vma(vm);
+
+	GEM_BUG_ON(!list_empty(&vm->bound_list));
+	GEM_BUG_ON(!list_empty(&vm->unbound_list));
+
+	vm->cleanup(vm);
+	i915_address_space_fini(vm);
+
+	kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+	struct i915_address_space *vm =
+		container_of(kref, struct i915_address_space, ref);
+
+	GEM_BUG_ON(i915_is_ggtt(vm));
+	trace_i915_ppgtt_release(vm);
+
+	vm->closed = true;
+	queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
 static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 {
 	kref_init(&vm->ref);
+	INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
 
 	/*
 	 * The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -505,19 +558,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 	INIT_LIST_HEAD(&vm->bound_list);
 }
 
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
-	spin_lock(&vm->free_pages.lock);
-	if (pagevec_count(&vm->free_pages.pvec))
-		vm_free_pages_release(vm, true);
-	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
-	spin_unlock(&vm->free_pages.lock);
-
-	drm_mm_takedown(&vm->mm);
-
-	mutex_destroy(&vm->mutex);
-}
-
 static int __setup_page_dma(struct i915_address_space *vm,
 			    struct i915_page_dma *p,
 			    gfp_t gfp)
@@ -554,8 +594,6 @@ static void cleanup_page_dma(struct i915_address_space *vm,
 
 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
 
-#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
-#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
 
@@ -657,7 +695,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 	if (unlikely(!pt))
 		return ERR_PTR(-ENOMEM);
 
-	if (unlikely(setup_px(vm, pt))) {
+	if (unlikely(setup_page_dma(vm, &pt->base))) {
 		kfree(pt);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -669,7 +707,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 
 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 {
-	cleanup_px(vm, pt);
+	cleanup_page_dma(vm, &pt->base);
 	kfree(pt);
 }
 
@@ -712,7 +750,7 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 	if (unlikely(!pd))
 		return ERR_PTR(-ENOMEM);
 
-	if (unlikely(setup_px(vm, pd))) {
+	if (unlikely(setup_page_dma(vm, &pd->base))) {
 		kfree(pd);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -720,36 +758,77 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 	return pd;
 }
 
-static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
-{
-	return pd->base.page;
-}
-
 static void free_pd(struct i915_address_space *vm,
 		    struct i915_page_directory *pd)
 {
-	if (likely(pd_has_phys_page(pd)))
-		cleanup_px(vm, pd);
-
+	cleanup_page_dma(vm, &pd->base);
 	kfree(pd);
 }
 
-static void init_pd_with_page(struct i915_address_space *vm,
-			      struct i915_page_directory * const pd,
-			      struct i915_page_table *pt)
-{
-	fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
-	memset_p(pd->entry, pt, 512);
+#define init_pd(vm, pd, to) {					\
+	fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \
+	memset_p((pd)->entry, (to), 512);				\
 }
 
-static void init_pd(struct i915_address_space *vm,
-		    struct i915_page_directory * const pd,
-		    struct i915_page_directory * const to)
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+		const unsigned short pde,
+		const u64 encoded_entry)
 {
-	GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
+	u64 * const vaddr = kmap_atomic(pdma->page);
 
-	fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
-	memset_p(pd->entry, to, 512);
+	vaddr[pde] = encoded_entry;
+	kunmap_atomic(vaddr);
+}
+
+static inline void
+__set_pd_entry(struct i915_page_directory * const pd,
+	       const unsigned short pde,
+	       struct i915_page_dma * const to,
+	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+	GEM_BUG_ON(atomic_read(&pd->used) > 512);
+
+	atomic_inc(&pd->used);
+	pd->entry[pde] = to;
+	write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
+}
+
+static inline void
+__clear_pd_entry(struct i915_page_directory * const pd,
+		 const unsigned short pde,
+		 struct i915_page_dma * const to,
+		 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+	GEM_BUG_ON(atomic_read(&pd->used) == 0);
+
+	write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC));
+	pd->entry[pde] = to;
+	atomic_dec(&pd->used);
+}
+
+#define set_pd_entry(pd, pde, to) \
+	__set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode)
+
+#define clear_pd_entry(pd, pde, to) \
+	__clear_pd_entry((pd), (pde), (to), gen8_pde_encode)
+
+static bool
+release_pd_entry(struct i915_page_directory * const pd,
+		 const unsigned short pde,
+		 atomic_t *counter,
+		 struct i915_page_dma * const scratch)
+{
+	bool free = false;
+
+	spin_lock(&pd->lock);
+	if (atomic_dec_and_test(counter)) {
+		clear_pd_entry(pd, pde, scratch);
+		free = true;
+	}
+	spin_unlock(&pd->lock);
+
+	return free;
 }
 
 /*
@@ -766,11 +845,11 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
 /* Removes entries from a single page table, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries.
  */
-static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
+static void gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 				struct i915_page_table *pt,
 				u64 start, u64 length)
 {
-	unsigned int num_entries = gen8_pte_count(start, length);
+	const unsigned int num_entries = gen8_pte_count(start, length);
 	gen8_pte_t *vaddr;
 
 	vaddr = kmap_atomic_px(pt);
@@ -778,22 +857,11 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 	kunmap_atomic(vaddr);
 
 	GEM_BUG_ON(num_entries > atomic_read(&pt->used));
-	return !atomic_sub_return(num_entries, &pt->used);
+
+	atomic_sub(num_entries, &pt->used);
 }
 
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
-			       struct i915_page_directory *pd,
-			       struct i915_page_table *pt,
-			       unsigned int pde)
-{
-	gen8_pde_t *vaddr;
-
-	vaddr = kmap_atomic_px(pd);
-	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
-	kunmap_atomic(vaddr);
-}
-
-static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
+static void gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 				struct i915_page_directory *pd,
 				u64 start, u64 length)
 {
@@ -801,48 +869,20 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
 	u32 pde;
 
 	gen8_for_each_pde(pt, pd, start, length, pde) {
-		bool free = false;
-
 		GEM_BUG_ON(pt == vm->scratch_pt);
 
-		if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
-			continue;
-
-		spin_lock(&pd->lock);
-		if (!atomic_read(&pt->used)) {
-			gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
-			pd->entry[pde] = vm->scratch_pt;
-
-			GEM_BUG_ON(!atomic_read(&pd->used));
-			atomic_dec(&pd->used);
-			free = true;
-		}
-		spin_unlock(&pd->lock);
-		if (free)
+		atomic_inc(&pt->used);
+		gen8_ppgtt_clear_pt(vm, pt, start, length);
+		if (release_pd_entry(pd, pde, &pt->used,
+				     px_base(vm->scratch_pt)))
 			free_pt(vm, pt);
 	}
-
-	return !atomic_read(&pd->used);
-}
-
-static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
-				struct i915_page_directory *pd,
-				unsigned int pdpe)
-{
-	gen8_ppgtt_pdpe_t *vaddr;
-
-	if (!pd_has_phys_page(pdp))
-		return;
-
-	vaddr = kmap_atomic_px(pdp);
-	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
-	kunmap_atomic(vaddr);
 }
 
 /* Removes entries from a single page dir pointer, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries
  */
-static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
+static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 				 struct i915_page_directory * const pdp,
 				 u64 start, u64 length)
 {
@@ -850,28 +890,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 	unsigned int pdpe;
 
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
-		bool free = false;
-
 		GEM_BUG_ON(pd == vm->scratch_pd);
 
-		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
-			continue;
-
-		spin_lock(&pdp->lock);
-		if (!atomic_read(&pd->used)) {
-			gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
-			pdp->entry[pdpe] = vm->scratch_pd;
-
-			GEM_BUG_ON(!atomic_read(&pdp->used));
-			atomic_dec(&pdp->used);
-			free = true;
-		}
-		spin_unlock(&pdp->lock);
-		if (free)
+		atomic_inc(&pd->used);
+		gen8_ppgtt_clear_pd(vm, pd, start, length);
+		if (release_pd_entry(pdp, pdpe, &pd->used,
+				     px_base(vm->scratch_pd)))
 			free_pd(vm, pd);
 	}
-
-	return !atomic_read(&pdp->used);
 }
 
 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
@@ -880,17 +906,6 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
 	gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
 }
 
-static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
-				 struct i915_page_directory *pdp,
-				 unsigned int pml4e)
-{
-	gen8_ppgtt_pml4e_t *vaddr;
-
-	vaddr = kmap_atomic_px(pml4);
-	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
-	kunmap_atomic(vaddr);
-}
-
 /* Removes entries from a single pml4.
  * This is the top-level structure in 4-level page tables used on gen8+.
  * Empty entries are always scratch pml4e.
@@ -906,20 +921,12 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
 	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
 
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-		bool free = false;
 		GEM_BUG_ON(pdp == vm->scratch_pdp);
 
-		if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
-			continue;
-
-		spin_lock(&pml4->lock);
-		if (!atomic_read(&pdp->used)) {
-			gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-			pml4->entry[pml4e] = vm->scratch_pdp;
-			free = true;
-		}
-		spin_unlock(&pml4->lock);
-		if (free)
+		atomic_inc(&pdp->used);
+		gen8_ppgtt_clear_pdp(vm, pdp, start, length);
+		if (release_pd_entry(pml4, pml4e, &pdp->used,
+				     px_base(vm->scratch_pdp)))
 			free_pd(vm, pdp);
 	}
 }
@@ -1232,7 +1239,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	}
 
 	gen8_initialize_pt(vm, vm->scratch_pt);
-	init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
+	init_pd(vm, vm->scratch_pd, vm->scratch_pt);
 	if (i915_vm_is_4lvl(vm))
 		init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
 
@@ -1255,6 +1262,11 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 	enum vgt_g2v_type msg;
 	int i;
 
+	if (create)
+		atomic_inc(&ppgtt->pd->used); /* never remove */
+	else
+		atomic_dec(&ppgtt->pd->used);
+
 	if (i915_vm_is_4lvl(vm)) {
 		const u64 daddr = px_dma(ppgtt->pd);
 
@@ -1371,9 +1383,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
 
 			spin_lock(&pd->lock);
 			if (pd->entry[pde] == vm->scratch_pt) {
-				gen8_ppgtt_set_pde(vm, pd, pt, pde);
-				pd->entry[pde] = pt;
-				atomic_inc(&pd->used);
+				set_pd_entry(pd, pde, pt);
 			} else {
 				alloc = pt;
 				pt = pd->entry[pde];
@@ -1415,13 +1425,11 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 				goto unwind;
 			}
 
-			init_pd_with_page(vm, pd, vm->scratch_pt);
+			init_pd(vm, pd, vm->scratch_pt);
 
 			spin_lock(&pdp->lock);
 			if (pdp->entry[pdpe] == vm->scratch_pd) {
-				gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
-				pdp->entry[pdpe] = pd;
-				atomic_inc(&pdp->used);
+				set_pd_entry(pdp, pdpe, pd);
 			} else {
 				alloc = pd;
 				pd = pdp->entry[pdpe];
@@ -1441,14 +1449,8 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 	goto out;
 
 unwind_pd:
-	spin_lock(&pdp->lock);
-	if (atomic_dec_and_test(&pd->used)) {
-		gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
-		GEM_BUG_ON(!atomic_read(&pdp->used));
-		atomic_dec(&pdp->used);
+	if (release_pd_entry(pdp, pdpe, &pd->used, px_base(vm->scratch_pd)))
 		free_pd(vm, pd);
-	}
-	spin_unlock(&pdp->lock);
 unwind:
 	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
 out:
@@ -1491,8 +1493,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 
 			spin_lock(&pml4->lock);
 			if (pml4->entry[pml4e] == vm->scratch_pdp) {
-				gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
-				pml4->entry[pml4e] = pdp;
+				set_pd_entry(pml4, pml4e, pdp);
 			} else {
 				alloc = pdp;
 				pdp = pml4->entry[pml4e];
@@ -1512,12 +1513,8 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 	goto out;
 
 unwind_pdp:
-	spin_lock(&pml4->lock);
-	if (atomic_dec_and_test(&pdp->used)) {
-		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+	if (release_pd_entry(pml4, pml4e, &pdp->used, px_base(vm->scratch_pdp)))
 		free_pd(vm, pdp);
-	}
-	spin_unlock(&pml4->lock);
 unwind:
 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
 out:
@@ -1540,29 +1537,23 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 		if (IS_ERR(pd))
 			goto unwind;
 
-		init_pd_with_page(vm, pd, vm->scratch_pt);
-		gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
-
-		atomic_inc(&pdp->used);
+		init_pd(vm, pd, vm->scratch_pt);
+		set_pd_entry(pdp, pdpe, pd);
 	}
 
-	atomic_inc(&pdp->used); /* never remove */
-
 	return 0;
 
 unwind:
-	start -= from;
-	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
-		gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
-		free_pd(vm, pd);
-	}
+	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
 	atomic_set(&pdp->used, 0);
 	return -ENOMEM;
 }
 
-static void ppgtt_init(struct drm_i915_private *i915,
-		       struct i915_ppgtt *ppgtt)
+static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
 {
+	struct drm_i915_private *i915 = gt->i915;
+
+	ppgtt->vm.gt = gt;
 	ppgtt->vm.i915 = i915;
 	ppgtt->vm.dma = &i915->drm.pdev->dev;
 	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
@@ -1575,6 +1566,50 @@ static void ppgtt_init(struct drm_i915_private *i915,
 	ppgtt->vm.vma_ops.clear_pages = clear_pages;
 }
 
+static void init_pd_n(struct i915_address_space *vm,
+		      struct i915_page_directory *pd,
+		      struct i915_page_directory *to,
+		      const unsigned int entries)
+{
+	const u64 daddr = gen8_pde_encode(px_dma(to), I915_CACHE_LLC);
+	u64 * const vaddr = kmap_atomic(pd->base.page);
+
+	memset64(vaddr, daddr, entries);
+	kunmap_atomic(vaddr);
+
+	memset_p(pd->entry, to, entries);
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+	struct i915_page_directory *pd;
+
+	if (i915_vm_is_4lvl(vm)) {
+		pd = alloc_pd(vm);
+		if (!IS_ERR(pd))
+			init_pd(vm, pd, vm->scratch_pdp);
+
+		return pd;
+	}
+
+	/* 3lvl */
+	pd = __alloc_pd();
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+
+	pd->entry[GEN8_3LVL_PDPES] = NULL;
+
+	if (unlikely(setup_page_dma(vm, &pd->base))) {
+		kfree(pd);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	init_pd_n(vm, pd, vm->scratch_pd, GEN8_3LVL_PDPES);
+
+	return pd;
+}
+
 /*
  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
  * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1591,7 +1626,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	ppgtt_init(i915, ppgtt);
+	ppgtt_init(ppgtt, &i915->gt);
 
 	/*
 	 * From bdw, there is hw support for read-only pages in the PPGTT.
@@ -1611,34 +1646,21 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 	if (err)
 		goto err_free;
 
-	ppgtt->pd = __alloc_pd();
-	if (!ppgtt->pd) {
-		err = -ENOMEM;
+	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+	if (IS_ERR(ppgtt->pd)) {
+		err = PTR_ERR(ppgtt->pd);
 		goto err_free_scratch;
 	}
 
 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
-		err = setup_px(&ppgtt->vm, ppgtt->pd);
-		if (err)
-			goto err_free_pdp;
-
-		init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
-
 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
 		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
 		ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
 	} else {
-		/*
-		 * We don't need to setup dma for top level pdp, only
-		 * for entries. So point entries to scratch.
-		 */
-		memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
-			 GEN8_3LVL_PDPES);
-
 		if (intel_vgpu_active(i915)) {
 			err = gen8_preallocate_top_level_pdp(ppgtt);
 			if (err)
-				goto err_free_pdp;
+				goto err_free_pd;
 		}
 
 		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1653,7 +1675,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 
 	return ppgtt;
 
-err_free_pdp:
+err_free_pd:
 	free_pd(&ppgtt->vm, ppgtt->pd);
 err_free_scratch:
 	gen8_free_scratch(&ppgtt->vm);
@@ -1672,25 +1694,26 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
 		  ppgtt->pd_addr + pde);
 }
 
-static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen7_ppgtt_enable(struct intel_gt *gt)
 {
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
 	struct intel_engine_cs *engine;
-	u32 ecochk, ecobits;
 	enum intel_engine_id id;
+	u32 ecochk;
 
-	ecobits = I915_READ(GAC_ECO_BITS);
-	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+	intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
 
-	ecochk = I915_READ(GAM_ECOCHK);
-	if (IS_HASWELL(dev_priv)) {
+	ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+	if (IS_HASWELL(i915)) {
 		ecochk |= ECOCHK_PPGTT_WB_HSW;
 	} else {
 		ecochk |= ECOCHK_PPGTT_LLC_IVB;
 		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
 	}
-	I915_WRITE(GAM_ECOCHK, ecochk);
+	intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
 
-	for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
 		/* GFX_MODE is per-ring on gen7+ */
 		ENGINE_WRITE(engine,
 			     RING_MODE_GEN7,
@@ -1698,22 +1721,29 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen6_ppgtt_enable(struct intel_gt *gt)
 {
-	u32 ecochk, gab_ctl, ecobits;
+	struct intel_uncore *uncore = gt->uncore;
 
-	ecobits = I915_READ(GAC_ECO_BITS);
-	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
-		   ECOBITS_PPGTT_CACHE64B);
+	intel_uncore_rmw(uncore,
+			 GAC_ECO_BITS,
+			 0,
+			 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
 
-	gab_ctl = I915_READ(GAB_CTL);
-	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+	intel_uncore_rmw(uncore,
+			 GAB_CTL,
+			 0,
+			 GAB_CTL_CONT_AFTER_PAGEFAULT);
 
-	ecochk = I915_READ(GAM_ECOCHK);
-	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+	intel_uncore_rmw(uncore,
+			 GAM_ECOCHK,
+			 0,
+			 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
-	if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
-		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+		intel_uncore_write(uncore,
+				   GFX_MODE,
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1848,7 +1878,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 
 	if (flush) {
 		mark_tlbs_dirty(&ppgtt->base);
-		gen6_ggtt_invalidate(vm->i915);
+		gen6_ggtt_invalidate(vm->gt->ggtt);
 	}
 
 	goto out;
@@ -1909,62 +1939,15 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
 			free_pt(&ppgtt->base.vm, pt);
 }
 
-struct gen6_ppgtt_cleanup_work {
-	struct work_struct base;
-	struct i915_vma *vma;
-};
-
-static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
-{
-	struct gen6_ppgtt_cleanup_work *work =
-		container_of(wrk, typeof(*work), base);
-	/* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
-	struct drm_i915_private *i915 = work->vma->vm->i915;
-
-	mutex_lock(&i915->drm.struct_mutex);
-	i915_vma_destroy(work->vma);
-	mutex_unlock(&i915->drm.struct_mutex);
-
-	kfree(work);
-}
-
-static int nop_set_pages(struct i915_vma *vma)
-{
-	return -ENODEV;
-}
-
-static void nop_clear_pages(struct i915_vma *vma)
-{
-}
-
-static int nop_bind(struct i915_vma *vma,
-		    enum i915_cache_level cache_level,
-		    u32 unused)
-{
-	return -ENODEV;
-}
-
-static void nop_unbind(struct i915_vma *vma)
-{
-}
-
-static const struct i915_vma_ops nop_vma_ops = {
-	.set_pages = nop_set_pages,
-	.clear_pages = nop_clear_pages,
-	.bind_vma = nop_bind,
-	.unbind_vma = nop_unbind,
-};
-
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
-	struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
+	struct drm_i915_private *i915 = vm->i915;
 
 	/* FIXME remove the struct_mutex to bring the locking under control */
-	INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
-	work->vma = ppgtt->vma;
-	work->vma->ops = &nop_vma_ops;
-	schedule_work(&work->base);
+	mutex_lock(&i915->drm.struct_mutex);
+	i915_vma_destroy(ppgtt->vma);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 	gen6_ppgtt_free_pd(ppgtt);
 	gen6_ppgtt_free_scratch(vm);
@@ -2001,7 +1984,7 @@ static int pd_vma_bind(struct i915_vma *vma,
 		gen6_write_pde(ppgtt, pde, pt);
 
 	mark_tlbs_dirty(&ppgtt->base);
-	gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+	gen6_ggtt_invalidate(ggtt);
 
 	return 0;
 }
@@ -2039,7 +2022,7 @@ static const struct i915_vma_ops pd_vma_ops = {
 static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
 {
 	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
-	struct i915_ggtt *ggtt = &i915->ggtt;
+	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -2049,7 +2032,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
 	if (!vma)
 		return ERR_PTR(-ENOMEM);
 
-	i915_active_init(i915, &vma->active, NULL);
+	i915_active_init(i915, &vma->active, NULL, NULL);
 	INIT_ACTIVE_REQUEST(&vma->last_fence);
 
 	vma->vm = &ggtt->vm;
@@ -2137,7 +2120,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	ppgtt_init(i915, &ppgtt->base);
+	ppgtt_init(&ppgtt->base, &i915->gt);
 
 	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
 	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
@@ -2146,16 +2129,10 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 
 	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
 
-	ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
-	if (!ppgtt->work) {
-		err = -ENOMEM;
-		goto err_free;
-	}
-
 	ppgtt->base.pd = __alloc_pd();
 	if (!ppgtt->base.pd) {
 		err = -ENOMEM;
-		goto err_work;
+		goto err_free;
 	}
 
 	err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2174,28 +2151,37 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 	gen6_ppgtt_free_scratch(&ppgtt->base.vm);
 err_pd:
 	kfree(ppgtt->base.pd);
-err_work:
-	kfree(ppgtt->work);
 err_free:
 	kfree(ppgtt);
 	return ERR_PTR(err);
 }
 
-static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+static void gtt_write_workarounds(struct intel_gt *gt)
 {
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+
 	/* This function is for gtt related workarounds. This function is
 	 * called on driver load and after a GPU reset, so you can place
 	 * workarounds here even if they get overwritten by GPU reset.
 	 */
 	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
-	if (IS_BROADWELL(dev_priv))
-		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
-	else if (IS_CHERRYVIEW(dev_priv))
-		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-	else if (IS_GEN9_LP(dev_priv))
-		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
-	else if (INTEL_GEN(dev_priv) >= 9)
-		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+	if (IS_BROADWELL(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+	else if (IS_CHERRYVIEW(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+	else if (IS_GEN9_LP(i915))
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+	else if (INTEL_GEN(i915) >= 9)
+		intel_uncore_write(uncore,
+				   GEN8_L3_LRA_1_GPGPU,
+				   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
 
 	/*
 	 * To support 64K PTEs we need to first enable the use of the
@@ -2208,21 +2194,24 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
 	 * 32K pages, but we don't currently have any support for it in our
 	 * driver.
 	 */
-	if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
-	    INTEL_GEN(dev_priv) <= 10)
-		I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
-			   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
-			   GAMW_ECO_ENABLE_64K_IPS_FIELD);
+	if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+	    INTEL_GEN(i915) <= 10)
+		intel_uncore_rmw(uncore,
+				 GEN8_GAMW_ECO_DEV_RW_IA,
+				 0,
+				 GAMW_ECO_ENABLE_64K_IPS_FIELD);
 }
 
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ppgtt_init_hw(struct intel_gt *gt)
 {
-	gtt_write_workarounds(dev_priv);
+	struct drm_i915_private *i915 = gt->i915;
 
-	if (IS_GEN(dev_priv, 6))
-		gen6_ppgtt_enable(dev_priv);
-	else if (IS_GEN(dev_priv, 7))
-		gen7_ppgtt_enable(dev_priv);
+	gtt_write_workarounds(gt);
+
+	if (IS_GEN(i915, 6))
+		gen6_ppgtt_enable(gt);
+	else if (IS_GEN(i915, 7))
+		gen7_ppgtt_enable(gt);
 
 	return 0;
 }
@@ -2250,42 +2239,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
 	return ppgtt;
 }
 
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
-{
-	struct list_head *phases[] = {
-		&vm->bound_list,
-		&vm->unbound_list,
-		NULL,
-	}, **phase;
-
-	vm->closed = true;
-	for (phase = phases; *phase; phase++) {
-		struct i915_vma *vma, *vn;
-
-		list_for_each_entry_safe(vma, vn, *phase, vm_link)
-			i915_vma_destroy(vma);
-	}
-}
-
-void i915_vm_release(struct kref *kref)
-{
-	struct i915_address_space *vm =
-		container_of(kref, struct i915_address_space, ref);
-
-	GEM_BUG_ON(i915_is_ggtt(vm));
-	trace_i915_ppgtt_release(vm);
-
-	ppgtt_destroy_vma(vm);
-
-	GEM_BUG_ON(!list_empty(&vm->bound_list));
-	GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
-	vm->cleanup(vm);
-	i915_address_space_fini(vm);
-
-	kfree(vm);
-}
-
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
@@ -2297,21 +2250,26 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
 	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
 }
 
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_private *i915 = ggtt->vm.i915;
 
 	/* Don't bother messing with faults pre GEN6 as we have little
 	 * documentation supporting that it's a good idea.
 	 */
-	if (INTEL_GEN(dev_priv) < 6)
+	if (INTEL_GEN(i915) < 6)
 		return;
 
-	i915_check_and_clear_faults(dev_priv);
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 
 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
-	i915_ggtt_invalidate(dev_priv);
+	ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+	ggtt_suspend_mappings(&i915->ggtt);
 }
 
 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2357,7 +2315,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 
 	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
 
-	ggtt->invalidate(vm->i915);
+	ggtt->invalidate(ggtt);
 }
 
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2385,7 +2343,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	 * We want to flush the TLBs only after we're certain all the PTE
 	 * updates have finished.
 	 */
-	ggtt->invalidate(vm->i915);
+	ggtt->invalidate(ggtt);
 }
 
 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
@@ -2400,7 +2358,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 
 	iowrite32(vm->pte_encode(addr, level, flags), pte);
 
-	ggtt->invalidate(vm->i915);
+	ggtt->invalidate(ggtt);
 }
 
 /*
@@ -2426,7 +2384,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	 * We want to flush the TLBs only after we're certain all the PTE
 	 * updates have finished.
 	 */
-	ggtt->invalidate(vm->i915);
+	ggtt->invalidate(ggtt);
 }
 
 static void nop_clear_range(struct i915_address_space *vm,
@@ -2794,14 +2752,19 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct i915_ppgtt *ppgtt;
 
+	mutex_lock(&i915->drm.struct_mutex);
+
 	ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
 	if (!ppgtt)
-		return;
+		goto out;
 
 	i915_vm_put(&ppgtt->vm);
 
 	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
 	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+
+out:
+	mutex_unlock(&i915->drm.struct_mutex);
 }
 
 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2830,7 +2793,13 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
 		drm_mm_remove_node(&ggtt->uc_fw);
 }
 
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+	ggtt_release_guc_top(ggtt);
+	drm_mm_remove_node(&ggtt->error_capture);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
 {
 	/* Let GEM Manage all of the aperture.
 	 *
@@ -2841,7 +2810,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	 * aperture.  One page should be enough to keep any prefetching inside
 	 * of the aperture.
 	 */
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	unsigned long hole_start, hole_end;
 	struct drm_mm_node *entry;
 	int ret;
@@ -2853,9 +2821,9 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	 * why.
 	 */
 	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
-			       intel_wopcm_guc_size(&dev_priv->wopcm));
+			       intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
 
-	ret = intel_vgt_balloon(dev_priv);
+	ret = intel_vgt_balloon(ggtt);
 	if (ret)
 		return ret;
 
@@ -2874,7 +2842,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	 */
 	ret = ggtt_reserve_guc_top(ggtt);
 	if (ret)
-		goto err_reserve;
+		goto err;
 
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
@@ -2887,35 +2855,38 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 	/* And finally clear the reserved guard page */
 	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
-	if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
-		ret = init_aliasing_ppgtt(dev_priv);
-		if (ret)
-			goto err_appgtt;
-	}
-
 	return 0;
 
-err_appgtt:
-	ggtt_release_guc_top(ggtt);
-err_reserve:
-	drm_mm_remove_node(&ggtt->error_capture);
+err:
+	cleanup_init_ggtt(ggtt);
 	return ret;
 }
 
-/**
- * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
- */
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
+int i915_init_ggtt(struct drm_i915_private *i915)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	int ret;
+
+	ret = init_ggtt(&i915->ggtt);
+	if (ret)
+		return ret;
+
+	if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+		ret = init_aliasing_ppgtt(i915);
+		if (ret)
+			cleanup_init_ggtt(&i915->ggtt);
+	}
+
+	return 0;
+}
+
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
 	struct i915_vma *vma, *vn;
-	struct pagevec *pvec;
 
 	ggtt->vm.closed = true;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	fini_aliasing_ppgtt(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
 
 	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
 		WARN_ON(i915_vma_unbind(vma));
@@ -2926,24 +2897,37 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 	ggtt_release_guc_top(ggtt);
 
 	if (drm_mm_initialized(&ggtt->vm.mm)) {
-		intel_vgt_deballoon(dev_priv);
+		intel_vgt_deballoon(ggtt);
 		i915_address_space_fini(&ggtt->vm);
 	}
 
 	ggtt->vm.cleanup(&ggtt->vm);
 
-	pvec = &dev_priv->mm.wc_stash.pvec;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	arch_phys_wc_del(ggtt->mtrr);
+	io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_cleanup_hw(struct drm_i915_private *i915)
+{
+	struct pagevec *pvec;
+
+	fini_aliasing_ppgtt(i915);
+
+	ggtt_cleanup_hw(&i915->ggtt);
+
+	pvec = &i915->mm.wc_stash.pvec;
 	if (pvec->nr) {
 		set_pages_array_wb(pvec->pages, pvec->nr);
 		__pagevec_release(pvec);
 	}
 
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	arch_phys_wc_del(ggtt->mtrr);
-	io_mapping_fini(&ggtt->iomap);
-
-	i915_gem_cleanup_stolen(dev_priv);
+	i915_gem_cleanup_stolen(i915);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3021,236 +3005,41 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 	return 0;
 }
 
-static struct intel_ppat_entry *
-__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
+static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
 {
-	struct intel_ppat_entry *entry = &ppat->entries[index];
-
-	GEM_BUG_ON(index >= ppat->max_entries);
-	GEM_BUG_ON(test_bit(index, ppat->used));
-
-	entry->ppat = ppat;
-	entry->value = value;
-	kref_init(&entry->ref);
-	set_bit(index, ppat->used);
-	set_bit(index, ppat->dirty);
-
-	return entry;
-}
-
-static void __free_ppat_entry(struct intel_ppat_entry *entry)
-{
-	struct intel_ppat *ppat = entry->ppat;
-	unsigned int index = entry - ppat->entries;
-
-	GEM_BUG_ON(index >= ppat->max_entries);
-	GEM_BUG_ON(!test_bit(index, ppat->used));
-
-	entry->value = ppat->clear_value;
-	clear_bit(index, ppat->used);
-	set_bit(index, ppat->dirty);
-}
-
-/**
- * intel_ppat_get - get a usable PPAT entry
- * @i915: i915 device instance
- * @value: the PPAT value required by the caller
- *
- * The function tries to search if there is an existing PPAT entry which
- * matches with the required value. If perfectly matched, the existing PPAT
- * entry will be used. If only partially matched, it will try to check if
- * there is any available PPAT index. If yes, it will allocate a new PPAT
- * index for the required entry and update the HW. If not, the partially
- * matched entry will be used.
- */
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value)
-{
-	struct intel_ppat *ppat = &i915->ppat;
-	struct intel_ppat_entry *entry = NULL;
-	unsigned int scanned, best_score;
-	int i;
-
-	GEM_BUG_ON(!ppat->max_entries);
-
-	scanned = best_score = 0;
-	for_each_set_bit(i, ppat->used, ppat->max_entries) {
-		unsigned int score;
-
-		score = ppat->match(ppat->entries[i].value, value);
-		if (score > best_score) {
-			entry = &ppat->entries[i];
-			if (score == INTEL_PPAT_PERFECT_MATCH) {
-				kref_get(&entry->ref);
-				return entry;
-			}
-			best_score = score;
-		}
-		scanned++;
-	}
-
-	if (scanned == ppat->max_entries) {
-		if (!entry)
-			return ERR_PTR(-ENOSPC);
-
-		kref_get(&entry->ref);
-		return entry;
-	}
-
-	i = find_first_zero_bit(ppat->used, ppat->max_entries);
-	entry = __alloc_ppat_entry(ppat, i, value);
-	ppat->update_hw(i915);
-	return entry;
-}
-
-static void release_ppat(struct kref *kref)
-{
-	struct intel_ppat_entry *entry =
-		container_of(kref, struct intel_ppat_entry, ref);
-	struct drm_i915_private *i915 = entry->ppat->i915;
-
-	__free_ppat_entry(entry);
-	entry->ppat->update_hw(i915);
-}
-
-/**
- * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
- * @entry: an intel PPAT entry
- *
- * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
- * entry is dynamically allocated, its reference count will be decreased. Once
- * the reference count becomes into zero, the PPAT index becomes free again.
- */
-void intel_ppat_put(const struct intel_ppat_entry *entry)
-{
-	struct intel_ppat *ppat = entry->ppat;
-	unsigned int index = entry - ppat->entries;
-
-	GEM_BUG_ON(!ppat->max_entries);
-
-	kref_put(&ppat->entries[index].ref, release_ppat);
-}
-
-static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
-	struct intel_ppat *ppat = &dev_priv->ppat;
-	int i;
-
-	for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
-		I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
-		clear_bit(i, ppat->dirty);
-	}
-}
-
-static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
-	struct intel_ppat *ppat = &dev_priv->ppat;
-	u64 pat = 0;
-	int i;
-
-	for (i = 0; i < ppat->max_entries; i++)
-		pat |= GEN8_PPAT(i, ppat->entries[i].value);
-
-	bitmap_clear(ppat->dirty, 0, ppat->max_entries);
-
-	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
-	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
-}
-
-static unsigned int bdw_private_pat_match(u8 src, u8 dst)
-{
-	unsigned int score = 0;
-	enum {
-		AGE_MATCH = BIT(0),
-		TC_MATCH = BIT(1),
-		CA_MATCH = BIT(2),
-	};
-
-	/* Cache attribute has to be matched. */
-	if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
-		return 0;
-
-	score |= CA_MATCH;
-
-	if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
-		score |= TC_MATCH;
-
-	if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
-		score |= AGE_MATCH;
-
-	if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
-		return INTEL_PPAT_PERFECT_MATCH;
-
-	return score;
-}
-
-static unsigned int chv_private_pat_match(u8 src, u8 dst)
-{
-	return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
-		INTEL_PPAT_PERFECT_MATCH : 0;
-}
-
-static void cnl_setup_private_ppat(struct intel_ppat *ppat)
-{
-	ppat->max_entries = 8;
-	ppat->update_hw = cnl_private_pat_update_hw;
-	ppat->match = bdw_private_pat_match;
-	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
-	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
-	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
-	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
-	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
-	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
-	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
-	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
-	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+	I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
+	I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+	I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+	I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
+	I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+	I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+	I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+	I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
  * bits. When using advanced contexts each context stores its own PAT, but
  * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_ppat *ppat)
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
 {
-	ppat->max_entries = 8;
-	ppat->update_hw = bdw_private_pat_update_hw;
-	ppat->match = bdw_private_pat_match;
-	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
+	u64 pat;
 
-	if (!HAS_PPGTT(ppat->i915)) {
-		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
-		 * so RTL will always use the value corresponding to
-		 * pat_sel = 000".
-		 * So let's disable cache for GGTT to avoid screen corruptions.
-		 * MOCS still can be used though.
-		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
-		 * before this patch, i.e. the same uncached + snooping access
-		 * like on gen6/7 seems to be in effect.
-		 * - So this just fixes blitter/render access. Again it looks
-		 * like it's not just uncached access, but uncached + snooping.
-		 * So we can still hold onto all our assumptions wrt cpu
-		 * clflushing on LLC machines.
-		 */
-		__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
-		return;
-	}
+	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) |	/* for normal objects, no eLLC */
+	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) |	/* for something pointing to ptes? */
+	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) |	/* for scanout with eLLC */
+	      GEN8_PPAT(3, GEN8_PPAT_UC) |			/* Uncached objects, mostly for scanout */
+	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
-	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
-	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
-	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
-	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
-	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
-	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
-	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
-	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
 }
 
-static void chv_setup_private_ppat(struct intel_ppat *ppat)
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
 {
-	ppat->max_entries = 8;
-	ppat->update_hw = bdw_private_pat_update_hw;
-	ppat->match = chv_private_pat_match;
-	ppat->clear_value = CHV_PPAT_SNOOP;
+	u64 pat;
 
 	/*
 	 * Map WB on BDW to snooped on CHV.
@@ -3271,14 +3060,17 @@ static void chv_setup_private_ppat(struct intel_ppat *ppat)
 	 * in order to keep the global status page working.
 	 */
 
-	__alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
-	__alloc_ppat_entry(ppat, 1, 0);
-	__alloc_ppat_entry(ppat, 2, 0);
-	__alloc_ppat_entry(ppat, 3, 0);
-	__alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
-	__alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
-	__alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
-	__alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
+	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(1, 0) |
+	      GEN8_PPAT(2, 0) |
+	      GEN8_PPAT(3, 0) |
+	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+	      GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
 }
 
 static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -3291,27 +3083,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 
 static void setup_private_pat(struct drm_i915_private *dev_priv)
 {
-	struct intel_ppat *ppat = &dev_priv->ppat;
-	int i;
-
-	ppat->i915 = dev_priv;
+	GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
 
 	if (INTEL_GEN(dev_priv) >= 10)
-		cnl_setup_private_ppat(ppat);
+		cnl_setup_private_ppat(dev_priv);
 	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
-		chv_setup_private_ppat(ppat);
+		chv_setup_private_ppat(dev_priv);
 	else
-		bdw_setup_private_ppat(ppat);
-
-	GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
-
-	for_each_clear_bit(i, ppat->used, ppat->max_entries) {
-		ppat->entries[i].value = ppat->clear_value;
-		ppat->entries[i].ppat = ppat;
-		set_bit(i, ppat->dirty);
-	}
-
-	ppat->update_hw(dev_priv);
+		bdw_setup_private_ppat(dev_priv);
 }
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3478,21 +3257,18 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
 	return 0;
 }
 
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct drm_i915_private *i915 = gt->i915;
 	int ret;
 
-	ggtt->vm.i915 = dev_priv;
-	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
+	ggtt->vm.gt = gt;
+	ggtt->vm.i915 = i915;
+	ggtt->vm.dma = &i915->drm.pdev->dev;
 
-	if (INTEL_GEN(dev_priv) <= 5)
+	if (INTEL_GEN(i915) <= 5)
 		ret = i915_gmch_probe(ggtt);
-	else if (INTEL_GEN(dev_priv) < 8)
+	else if (INTEL_GEN(i915) < 8)
 		ret = gen6_gmch_probe(ggtt);
 	else
 		ret = gen8_gmch_probe(ggtt);
@@ -3520,19 +3296,69 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
 	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
 			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
+
+	return 0;
+}
+
+/**
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
+ */
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
+{
+	int ret;
+
+	ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+	if (ret)
+		return ret;
+
 	if (intel_vtd_active())
 		DRM_INFO("VT-d active for gfx access\n");
 
 	return 0;
 }
 
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+	struct drm_i915_private *i915 = ggtt->vm.i915;
+	int ret = 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+	ggtt->vm.is_ggtt = true;
+
+	/* Only VLV supports read-only GGTT mappings */
+	ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
+
+	if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
+		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
+
+	if (!io_mapping_init_wc(&ggtt->iomap,
+				ggtt->gmadr.start,
+				ggtt->mappable_end)) {
+		ggtt->vm.cleanup(&ggtt->vm);
+		ret = -EIO;
+		goto out;
+	}
+
+	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+
+	i915_ggtt_init_fences(ggtt);
+
+out:
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return ret;
+}
+
 /**
  * i915_ggtt_init_hw - Initialize GGTT hardware
  * @dev_priv: i915 device
  */
 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
 	stash_init(&dev_priv->mm.wc_stash);
@@ -3542,28 +3368,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	 * beyond the end of the batch buffer, across the page boundary,
 	 * and beyond the end of the GTT if we do not provide a guard.
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
-	ggtt->vm.is_ggtt = true;
-
-	/* Only VLV supports read-only GGTT mappings */
-	ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
-
-	if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
-		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
-				dev_priv->ggtt.gmadr.start,
-				dev_priv->ggtt.mappable_end)) {
-		ret = -EIO;
-		goto out_gtt_cleanup;
-	}
-
-	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
-
-	i915_ggtt_init_fences(ggtt);
+	ret = ggtt_init_hw(&dev_priv->ggtt);
+	if (ret)
+		return ret;
 
 	/*
 	 * Initialise stolen early so that we may reserve preallocated
@@ -3576,7 +3383,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	return 0;
 
 out_gtt_cleanup:
-	ggtt->vm.cleanup(&ggtt->vm);
+	dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
 	return ret;
 }
 
@@ -3590,33 +3397,36 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
 
 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 {
-	GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
+	struct i915_ggtt *ggtt = &i915->ggtt;
 
-	i915->ggtt.invalidate = guc_ggtt_invalidate;
+	GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
 
-	i915_ggtt_invalidate(i915);
+	ggtt->invalidate = guc_ggtt_invalidate;
+
+	ggtt->invalidate(ggtt);
 }
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
+	struct i915_ggtt *ggtt = &i915->ggtt;
+
 	/* XXX Temporary pardon for error unload */
-	if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
+	if (ggtt->invalidate == gen6_ggtt_invalidate)
 		return;
 
 	/* We should only be called after i915_ggtt_enable_guc() */
-	GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
+	GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
 
-	i915->ggtt.invalidate = gen6_ggtt_invalidate;
+	ggtt->invalidate = gen6_ggtt_invalidate;
 
-	i915_ggtt_invalidate(i915);
+	ggtt->invalidate(ggtt);
 }
 
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma, *vn;
 
-	i915_check_and_clear_faults(dev_priv);
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 
 	mutex_lock(&ggtt->vm.mutex);
 
@@ -3650,17 +3460,17 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 	}
 
 	ggtt->vm.closed = false;
-	i915_ggtt_invalidate(dev_priv);
+	ggtt->invalidate(ggtt);
 
 	mutex_unlock(&ggtt->vm.mutex);
+}
 
-	if (INTEL_GEN(dev_priv) >= 8) {
-		struct intel_ppat *ppat = &dev_priv->ppat;
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+	ggtt_restore_mappings(&i915->ggtt);
 
-		bitmap_set(ppat->dirty, 0, ppat->max_entries);
-		dev_priv->ppat.update_hw(dev_priv);
-		return;
-	}
+	if (INTEL_GEN(i915) >= 8)
+		setup_private_pat(i915);
 }
 
 static struct scatterlist *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 812717cc..57a68ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,15 +35,19 @@
 #define __I915_GEM_GTT_H__
 
 #include <linux/io-mapping.h>
+#include <linux/kref.h>
 #include <linux/mm.h>
 #include <linux/pagevec.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
 
 #include "gt/intel_reset.h"
 #include "i915_gem_fence_reg.h"
 #include "i915_request.h"
 #include "i915_scatterlist.h"
 #include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
 
 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
@@ -64,12 +68,10 @@
 struct drm_i915_file_private;
 struct drm_i915_gem_object;
 struct i915_vma;
+struct intel_gt;
 
 typedef u32 gen6_pte_t;
 typedef u64 gen8_pte_t;
-typedef u64 gen8_pde_t;
-typedef u64 gen8_ppgtt_pdpe_t;
-typedef u64 gen8_ppgtt_pml4e_t;
 
 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
 
@@ -155,11 +157,6 @@ typedef u64 gen8_ppgtt_pml4e_t;
 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
 
-#define GEN8_PPAT_GET_CA(x) ((x) & 3)
-#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
-#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
-#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
-
 #define GEN8_PDE_IPS_64K BIT(11)
 #define GEN8_PDE_PS_2M   BIT(7)
 
@@ -280,8 +277,10 @@ struct pagestash {
 
 struct i915_address_space {
 	struct kref ref;
+	struct rcu_work rcu;
 
 	struct drm_mm mm;
+	struct intel_gt *gt;
 	struct drm_i915_private *i915;
 	struct device *dma;
 	/* Every address space belongs to a struct file - except for the global
@@ -386,7 +385,7 @@ struct i915_ggtt {
 
 	/** "Graphics Stolen Memory" holds the global PTEs */
 	void __iomem *gsm;
-	void (*invalidate)(struct drm_i915_private *dev_priv);
+	void (*invalidate)(struct i915_ggtt *ggtt);
 
 	bool do_idle_maps;
 
@@ -425,8 +424,6 @@ struct gen6_ppgtt {
 
 	unsigned int pin_count;
 	bool scan_for_unused_pt;
-
-	struct gen6_ppgtt_cleanup_work *work;
 };
 
 #define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
@@ -614,46 +611,15 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 	return container_of(vm, struct i915_ppgtt, vm);
 }
 
-#define INTEL_MAX_PPAT_ENTRIES 8
-#define INTEL_PPAT_PERFECT_MATCH (~0U)
-
-struct intel_ppat;
-
-struct intel_ppat_entry {
-	struct intel_ppat *ppat;
-	struct kref ref;
-	u8 value;
-};
-
-struct intel_ppat {
-	struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
-	DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
-	DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
-	unsigned int max_entries;
-	u8 clear_value;
-	/*
-	 * Return a score to show how two PPAT values match,
-	 * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
-	 */
-	unsigned int (*match)(u8 src, u8 dst);
-	void (*update_hw)(struct drm_i915_private *i915);
-
-	struct drm_i915_private *i915;
-};
-
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value);
-void intel_ppat_put(const struct intel_ppat_entry *entry);
-
 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
 void i915_ggtt_enable_guc(struct drm_i915_private *i915);
 void i915_ggtt_disable_guc(struct drm_i915_private *i915);
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
+int i915_init_ggtt(struct drm_i915_private *dev_priv);
 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ppgtt_init_hw(struct intel_gt *gt);
 
 struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
deleted file mode 100644
index 112cda8..0000000
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _I915_GEM_RENDER_STATE_H_
-#define _I915_GEM_RENDER_STATE_H_
-
-struct i915_request;
-
-int i915_gem_render_state_emit(struct i915_request *rq);
-
-#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 04c1ce1..2d199f4 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -7,6 +7,8 @@
 #ifndef _I915_GLOBALS_H_
 #define _I915_GLOBALS_H_
 
+#include <linux/types.h>
+
 typedef void (*i915_global_func_t)(void);
 
 struct i915_global {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b7e9fdd..5489cd8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1248,10 +1248,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	}
 }
 
-static void record_request(struct i915_request *request,
+static void record_request(const struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
-	struct i915_gem_context *ctx = request->gem_context;
+	const struct i915_gem_context *ctx = request->gem_context;
 
 	erq->flags = request->fence.flags;
 	erq->context = request->fence.context;
@@ -1315,20 +1315,15 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 	ee->num_requests = count;
 }
 
-static void error_record_engine_execlists(struct intel_engine_cs *engine,
+static void error_record_engine_execlists(const struct intel_engine_cs *engine,
 					  struct drm_i915_error_engine *ee)
 {
 	const struct intel_engine_execlists * const execlists = &engine->execlists;
-	unsigned int n;
+	struct i915_request * const *port = execlists->active;
+	unsigned int n = 0;
 
-	for (n = 0; n < execlists_num_ports(execlists); n++) {
-		struct i915_request *rq = port_request(&execlists->port[n]);
-
-		if (!rq)
-			break;
-
-		record_request(rq, &ee->execlist[n]);
-	}
+	while (*port)
+		record_request(*port++, &ee->execlist[n++]);
 
 	ee->num_ports = n;
 }
@@ -1410,7 +1405,6 @@ capture_object(struct drm_i915_private *dev_priv,
 static void gem_record_rings(struct i915_gpu_state *error)
 {
 	struct drm_i915_private *i915 = error->i915;
-	struct i915_ggtt *ggtt = &i915->ggtt;
 	int i;
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -1433,7 +1427,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 			struct i915_gem_context *ctx = request->gem_context;
 			struct intel_ring *ring;
 
-			ee->vm = ctx->vm ?: &ggtt->vm;
+			ee->vm = ctx->vm ?: &engine->gt->ggtt->vm;
 
 			record_context(&ee->context, ctx);
 
@@ -1446,8 +1440,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 			if (HAS_BROKEN_CS_TLB(i915))
 				ee->wa_batchbuffer =
-					i915_error_object_create(i915,
-								 i915->gt.scratch);
+				  i915_error_object_create(i915,
+							   engine->gt->scratch);
 			request_record_user_bo(request, ee);
 
 			ee->ctx =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b2e27b5..7c5ba5c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -305,17 +305,17 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 }
 
 static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
+gen11_gt_engine_identity(struct intel_gt *gt,
 			 const unsigned int bank, const unsigned int bit);
 
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+static bool gen11_reset_one_iir(struct intel_gt *gt,
 				const unsigned int bank,
 				const unsigned int bit)
 {
-	void __iomem * const regs = i915->uncore.regs;
+	void __iomem * const regs = gt->uncore->regs;
 	u32 dw;
 
-	lockdep_assert_held(&i915->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
 	if (dw & BIT(bit)) {
@@ -323,7 +323,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
 		 * According to the BSpec, DW_IIR bits cannot be cleared without
 		 * first servicing the Selector & Shared IIR registers.
 		 */
-		gen11_gt_engine_identity(i915, bank, bit);
+		gen11_gt_engine_identity(gt, bank, bit);
 
 		/*
 		 * We locked GT INT DW by reading it. If we want to (try
@@ -409,50 +409,54 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 }
 
-static void write_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct intel_gt *gt)
 {
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 mask = gt->pm_imr;
 	i915_reg_t reg;
-	u32 mask = dev_priv->pm_imr;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
 		/* pm is in upper half */
 		mask = mask << 16;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+	} else if (INTEL_GEN(i915) >= 8) {
 		reg = GEN8_GT_IMR(2);
 	} else {
 		reg = GEN6_PMIMR;
 	}
 
-	I915_WRITE(reg, mask);
-	POSTING_READ(reg);
+	intel_uncore_write(uncore, reg, mask);
+	intel_uncore_posting_read(uncore, reg);
 }
 
-static void write_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct intel_gt *gt)
 {
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_uncore *uncore = gt->uncore;
+	u32 mask = gt->pm_ier;
 	i915_reg_t reg;
-	u32 mask = dev_priv->pm_ier;
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
 		/* pm is in upper half */
 		mask = mask << 16;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
+	} else if (INTEL_GEN(i915) >= 8) {
 		reg = GEN8_GT_IER(2);
 	} else {
 		reg = GEN6_PMIER;
 	}
 
-	I915_WRITE(reg, mask);
+	intel_uncore_write(uncore, reg, mask);
 }
 
 /**
  * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
+ * @gt: gt for the interrupts
  * @interrupt_mask: mask of interrupt bits to update
  * @enabled_irq_mask: mask of interrupt bits to enable
  */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct intel_gt *gt,
 			      u32 interrupt_mask,
 			      u32 enabled_irq_mask)
 {
@@ -460,37 +464,37 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 
 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
-	new_val = dev_priv->pm_imr;
+	new_val = gt->pm_imr;
 	new_val &= ~interrupt_mask;
 	new_val |= (~enabled_irq_mask & interrupt_mask);
 
-	if (new_val != dev_priv->pm_imr) {
-		dev_priv->pm_imr = new_val;
-		write_pm_imr(dev_priv);
+	if (new_val != gt->pm_imr) {
+		gt->pm_imr = new_val;
+		write_pm_imr(gt);
 	}
 }
 
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
 {
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
 		return;
 
-	snb_update_pm_irq(dev_priv, mask, mask);
+	snb_update_pm_irq(gt, mask, mask);
 }
 
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
 {
-	snb_update_pm_irq(dev_priv, mask, 0);
+	snb_update_pm_irq(gt, mask, 0);
 }
 
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
 {
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
 		return;
 
-	__gen6_mask_pm_irq(dev_priv, mask);
+	__gen6_mask_pm_irq(gt, mask);
 }
 
 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
@@ -504,23 +508,23 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
 	POSTING_READ(reg);
 }
 
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
 {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
-	dev_priv->pm_ier |= enable_mask;
-	write_pm_ier(dev_priv);
-	gen6_unmask_pm_irq(dev_priv, enable_mask);
+	gt->pm_ier |= enable_mask;
+	write_pm_ier(gt);
+	gen6_unmask_pm_irq(gt, enable_mask);
 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
 }
 
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
 {
-	lockdep_assert_held(&dev_priv->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
-	dev_priv->pm_ier &= ~disable_mask;
-	__gen6_mask_pm_irq(dev_priv, disable_mask);
-	write_pm_ier(dev_priv);
+	gt->pm_ier &= ~disable_mask;
+	__gen6_mask_pm_irq(gt, disable_mask);
+	write_pm_ier(gt);
 	/* though a barrier is missing here, but don't really need a one */
 }
 
@@ -528,7 +532,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	spin_lock_irq(&dev_priv->irq_lock);
 
-	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
 		;
 
 	dev_priv->gt_pm.rps.pm_iir = 0;
@@ -546,6 +550,7 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
+	struct intel_gt *gt = &dev_priv->gt;
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
 	if (READ_ONCE(rps->interrupts_enabled))
@@ -555,12 +560,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	WARN_ON_ONCE(rps->pm_iir);
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
+		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
 	else
 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 
 	rps->interrupts_enabled = true;
-	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -577,10 +582,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 
 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
-	gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 
 	/* Now that we will not be generating any more work, flush any
 	 * outstanding tasks. As we are called on the RPS idle path,
@@ -612,7 +617,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
 				       dev_priv->pm_guc_events);
 		dev_priv->guc.interrupts.enabled = true;
-		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+		gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
 	}
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -624,10 +629,10 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->guc.interrupts.enabled = false;
 
-	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+	gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 
 	gen9_reset_guc_interrupts(dev_priv);
 }
@@ -635,7 +640,7 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
 {
 	spin_lock_irq(&i915->irq_lock);
-	gen11_reset_one_iir(i915, 0, GEN11_GUC);
+	gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC);
 	spin_unlock_irq(&i915->irq_lock);
 }
 
@@ -646,7 +651,7 @@ void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
 		u32 events = REG_FIELD_PREP(ENGINE1_MASK,
 					    GEN11_GUC_INTR_GUC2HOST);
 
-		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
+		WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC));
 		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
 		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
 		dev_priv->guc.interrupts.enabled = true;
@@ -663,7 +668,7 @@ void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
 
 	spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 
 	gen11_reset_guc_interrupts(dev_priv);
 }
@@ -924,11 +929,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
 	const struct drm_display_mode *mode = &vblank->hwmode;
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	i915_reg_t high_frame, low_frame;
 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 	unsigned long irqflags;
@@ -989,9 +995,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 }
 
-static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 }
@@ -1107,10 +1114,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 	return (position + crtc->scanline_offset) % vtotal;
 }
 
-static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
-				     bool in_vblank_irq, int *vpos, int *hpos,
-				     ktime_t *stime, ktime_t *etime,
-				     const struct drm_display_mode *mode)
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+			      bool in_vblank_irq, int *vpos, int *hpos,
+			      ktime_t *stime, ktime_t *etime,
+			      const struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
@@ -1424,7 +1431,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (rps->interrupts_enabled)
-		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
+		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
@@ -1891,8 +1898,9 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 /* The RPS events need forcewake, so we add them to a work queue and mask their
  * IMR bits until the work is done. Other interrupts can be processed without
  * the work queue. */
-static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
 {
+	struct drm_i915_private *i915 = gt->i915;
 	struct intel_rps *rps = &i915->gt_pm.rps;
 	const u32 events = i915->pm_rps_events & pm_iir;
 
@@ -1901,7 +1909,7 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
 	if (unlikely(!events))
 		return;
 
-	gen6_mask_pm_irq(i915, events);
+	gen6_mask_pm_irq(gt, events);
 
 	if (!rps->interrupts_enabled)
 		return;
@@ -1916,7 +1924,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 	if (pm_iir & dev_priv->pm_rps_events) {
 		spin_lock(&dev_priv->irq_lock);
-		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+		gen6_mask_pm_irq(&dev_priv->gt,
+				 pm_iir & dev_priv->pm_rps_events);
 		if (rps->interrupts_enabled) {
 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
 			schedule_work(&rps->work);
@@ -2185,8 +2194,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
 
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -2271,8 +2279,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -2691,8 +2698,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
  */
 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
 
@@ -3002,7 +3008,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
 
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
-	struct drm_i915_private *dev_priv = to_i915(arg);
+	struct drm_i915_private *dev_priv = arg;
 	void __iomem * const regs = dev_priv->uncore.regs;
 	u32 master_ctl;
 	u32 gt_iir[4];
@@ -3034,14 +3040,14 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 }
 
 static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
+gen11_gt_engine_identity(struct intel_gt *gt,
 			 const unsigned int bank, const unsigned int bit)
 {
-	void __iomem * const regs = i915->uncore.regs;
+	void __iomem * const regs = gt->uncore->regs;
 	u32 timeout_ts;
 	u32 ident;
 
-	lockdep_assert_held(&i915->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
 
@@ -3068,27 +3074,29 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915,
 }
 
 static void
-gen11_other_irq_handler(struct drm_i915_private * const i915,
-			const u8 instance, const u16 iir)
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+			const u16 iir)
 {
+	struct drm_i915_private *i915 = gt->i915;
+
 	if (instance == OTHER_GUC_INSTANCE)
 		return gen11_guc_irq_handler(i915, iir);
 
 	if (instance == OTHER_GTPM_INSTANCE)
-		return gen11_rps_irq_handler(i915, iir);
+		return gen11_rps_irq_handler(gt, iir);
 
 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
 		  instance, iir);
 }
 
 static void
-gen11_engine_irq_handler(struct drm_i915_private * const i915,
-			 const u8 class, const u8 instance, const u16 iir)
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+			 const u8 instance, const u16 iir)
 {
 	struct intel_engine_cs *engine;
 
 	if (instance <= MAX_ENGINE_INSTANCE)
-		engine = i915->engine_class[class][instance];
+		engine = gt->i915->engine_class[class][instance];
 	else
 		engine = NULL;
 
@@ -3100,8 +3108,7 @@ gen11_engine_irq_handler(struct drm_i915_private * const i915,
 }
 
 static void
-gen11_gt_identity_handler(struct drm_i915_private * const i915,
-			  const u32 identity)
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
 {
 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
@@ -3111,31 +3118,30 @@ gen11_gt_identity_handler(struct drm_i915_private * const i915,
 		return;
 
 	if (class <= COPY_ENGINE_CLASS)
-		return gen11_engine_irq_handler(i915, class, instance, intr);
+		return gen11_engine_irq_handler(gt, class, instance, intr);
 
 	if (class == OTHER_CLASS)
-		return gen11_other_irq_handler(i915, instance, intr);
+		return gen11_other_irq_handler(gt, instance, intr);
 
 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
 		  class, instance, intr);
 }
 
 static void
-gen11_gt_bank_handler(struct drm_i915_private * const i915,
-		      const unsigned int bank)
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
 {
-	void __iomem * const regs = i915->uncore.regs;
+	void __iomem * const regs = gt->uncore->regs;
 	unsigned long intr_dw;
 	unsigned int bit;
 
-	lockdep_assert_held(&i915->irq_lock);
+	lockdep_assert_held(&gt->i915->irq_lock);
 
 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
 
 	for_each_set_bit(bit, &intr_dw, 32) {
-		const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
+		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
 
-		gen11_gt_identity_handler(i915, ident);
+		gen11_gt_identity_handler(gt, ident);
 	}
 
 	/* Clear must be after shared has been served for engine */
@@ -3143,25 +3149,25 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
 }
 
 static void
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
-		     const u32 master_ctl)
+gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
 {
+	struct drm_i915_private *i915 = gt->i915;
 	unsigned int bank;
 
 	spin_lock(&i915->irq_lock);
 
 	for (bank = 0; bank < 2; bank++) {
 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
-			gen11_gt_bank_handler(i915, bank);
+			gen11_gt_bank_handler(gt, bank);
 	}
 
 	spin_unlock(&i915->irq_lock);
 }
 
 static u32
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
 {
-	void __iomem * const regs = dev_priv->uncore.regs;
+	void __iomem * const regs = gt->uncore->regs;
 	u32 iir;
 
 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3175,10 +3181,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 }
 
 static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
+gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
 {
 	if (iir & GEN11_GU_MISC_GSE)
-		intel_opregion_asle_intr(dev_priv);
+		intel_opregion_asle_intr(gt->i915);
 }
 
 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -3201,8 +3207,9 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
-	struct drm_i915_private * const i915 = to_i915(arg);
+	struct drm_i915_private * const i915 = arg;
 	void __iomem * const regs = i915->uncore.regs;
+	struct intel_gt *gt = &i915->gt;
 	u32 master_ctl;
 	u32 gu_misc_iir;
 
@@ -3216,7 +3223,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 	}
 
 	/* Find, clear, then process each source of interrupt. */
-	gen11_gt_irq_handler(i915, master_ctl);
+	gen11_gt_irq_handler(gt, master_ctl);
 
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
 	if (master_ctl & GEN11_DISPLAY_IRQ) {
@@ -3231,11 +3238,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
 	}
 
-	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
 
 	gen11_master_intr_enable(regs);
 
-	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
 
 	return IRQ_HANDLED;
 }
@@ -3243,9 +3250,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i8xx_enable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3255,19 +3263,20 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return 0;
 }
 
-static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i945gm_enable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 
 	if (dev_priv->i945gm_vblank.enabled++ == 0)
 		schedule_work(&dev_priv->i945gm_vblank.work);
 
-	return i8xx_enable_vblank(dev, pipe);
+	return i8xx_enable_vblank(crtc);
 }
 
-static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i965_enable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3278,9 +3287,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return 0;
 }
 
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int ilk_enable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3293,14 +3303,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	 * PSR is active as no frames are generated.
 	 */
 	if (HAS_PSR(dev_priv))
-		drm_vblank_restore(dev, pipe);
+		drm_crtc_vblank_restore(crtc);
 
 	return 0;
 }
 
-static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int bdw_enable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3311,7 +3322,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	 * PSR is active as no frames are generated, so check only for PSR.
 	 */
 	if (HAS_PSR(dev_priv))
-		drm_vblank_restore(dev, pipe);
+		drm_crtc_vblank_restore(crtc);
 
 	return 0;
 }
@@ -3319,9 +3330,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i8xx_disable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3329,19 +3341,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i945gm_disable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 
-	i8xx_disable_vblank(dev, pipe);
+	i8xx_disable_vblank(crtc);
 
 	if (--dev_priv->i945gm_vblank.enabled == 0)
 		schedule_work(&dev_priv->i945gm_vblank.work);
 }
 
-static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i965_disable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3350,9 +3363,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void ilk_disable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3362,9 +3376,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void bdw_disable_vblank(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3447,10 +3462,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
  *
  * This function needs to be called before interrupts are enabled.
  */
-static void ibx_irq_pre_postinstall(struct drm_device *dev)
+static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	if (HAS_PCH_NOP(dev_priv))
 		return;
 
@@ -3473,12 +3486,12 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	if (IS_CHERRYVIEW(dev_priv))
-		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
 	else
-		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
 
 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
-	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
 	i9xx_pipestat_irq_reset(dev_priv);
 
@@ -3519,18 +3532,17 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
 /* drm_dma.h hooks
 */
-static void ironlake_irq_reset(struct drm_device *dev)
+static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	GEN3_IRQ_RESET(uncore, DE);
 	if (IS_GEN(dev_priv, 7))
-		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
 
 	if (IS_HASWELL(dev_priv)) {
-		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
-		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
 	}
 
 	gen5_gt_irq_reset(dev_priv);
@@ -3538,10 +3550,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
 	ibx_irq_reset(dev_priv);
 }
 
-static void valleyview_irq_reset(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	I915_WRITE(VLV_MASTER_IER, 0);
 	POSTING_READ(VLV_MASTER_IER);
 
@@ -3563,9 +3573,8 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
 }
 
-static void gen8_irq_reset(struct drm_device *dev)
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	int pipe;
 
@@ -3573,8 +3582,8 @@ static void gen8_irq_reset(struct drm_device *dev)
 
 	gen8_gt_irq_reset(dev_priv);
 
-	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
-	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
 
 	for_each_pipe(dev_priv, pipe)
 		if (intel_display_power_is_enabled(dev_priv,
@@ -3589,39 +3598,40 @@ static void gen8_irq_reset(struct drm_device *dev)
 		ibx_irq_reset(dev_priv);
 }
 
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_reset(struct intel_gt *gt)
 {
+	struct intel_uncore *uncore = gt->uncore;
+
 	/* Disable RCS, BCS, VCS and VECS class engines. */
-	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
-	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
+	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
 
 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
-	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
-	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
-	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
-	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
-	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
+	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
 
-	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
-	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
-	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
-	I915_WRITE(GEN11_GUC_SG_INTR_MASK,  ~0);
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
 }
 
-static void gen11_irq_reset(struct drm_device *dev)
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	int pipe;
 
 	gen11_master_intr_disable(dev_priv->uncore.regs);
 
-	gen11_gt_irq_reset(dev_priv);
+	gen11_gt_irq_reset(&dev_priv->gt);
 
-	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
 
-	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
-	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
 
 	for_each_pipe(dev_priv, pipe)
 		if (intel_display_power_is_enabled(dev_priv,
@@ -3680,12 +3690,11 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	/* make sure we're done processing display irqs */
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 }
 
-static void cherryview_irq_reset(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3950,9 +3959,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
 }
 
-static void ibx_irq_postinstall(struct drm_device *dev)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 mask;
 
 	if (HAS_PCH_NOP(dev_priv))
@@ -3975,9 +3983,8 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 		spt_hpd_detection_setup(dev_priv);
 }
 
-static void gen5_gt_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 pm_irqs, gt_irqs;
 
@@ -4006,17 +4013,16 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 		 */
 		if (HAS_ENGINE(dev_priv, VECS0)) {
 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
-			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
 		}
 
-		dev_priv->pm_imr = 0xffffffff;
-		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
+		dev_priv->gt.pm_imr = 0xffffffff;
+		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
 	}
 }
 
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 display_mask, extra_mask;
 
@@ -4043,16 +4049,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 
 	dev_priv->irq_mask = ~display_mask;
 
-	ibx_irq_pre_postinstall(dev);
+	ibx_irq_pre_postinstall(dev_priv);
 
 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
 		      display_mask | extra_mask);
 
-	gen5_gt_irq_postinstall(dev);
+	gen5_gt_irq_postinstall(dev_priv);
 
 	ilk_hpd_detection_setup(dev_priv);
 
-	ibx_irq_postinstall(dev);
+	ibx_irq_postinstall(dev_priv);
 
 	if (IS_IRONLAKE_M(dev_priv)) {
 		/* Enable PCU event interrupts
@@ -4064,8 +4070,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
 		spin_unlock_irq(&dev_priv->irq_lock);
 	}
-
-	return 0;
 }
 
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -4097,11 +4101,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 }
 
 
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	gen5_gt_irq_postinstall(dev);
+	gen5_gt_irq_postinstall(dev_priv);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	if (dev_priv->display_irqs_enabled)
@@ -4110,13 +4112,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 
 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
 	POSTING_READ(VLV_MASTER_IER);
-
-	return 0;
 }
 
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_gt *gt = &i915->gt;
+	struct intel_uncore *uncore = gt->uncore;
 
 	/* These are interrupts we'll toggle with the ring mask register */
 	u32 gt_interrupts[] = {
@@ -4136,15 +4137,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
 	};
 
-	dev_priv->pm_ier = 0x0;
-	dev_priv->pm_imr = ~dev_priv->pm_ier;
+	gt->pm_ier = 0x0;
+	gt->pm_imr = ~gt->pm_ier;
 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
 	/*
 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
 	 */
-	GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
 }
 
@@ -4218,58 +4219,56 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 	}
 }
 
-static int gen8_irq_postinstall(struct drm_device *dev)
+static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	if (HAS_PCH_SPLIT(dev_priv))
-		ibx_irq_pre_postinstall(dev);
+		ibx_irq_pre_postinstall(dev_priv);
 
 	gen8_gt_irq_postinstall(dev_priv);
 	gen8_de_irq_postinstall(dev_priv);
 
 	if (HAS_PCH_SPLIT(dev_priv))
-		ibx_irq_postinstall(dev);
+		ibx_irq_postinstall(dev_priv);
 
 	gen8_master_intr_enable(dev_priv->uncore.regs);
-
-	return 0;
 }
 
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_postinstall(struct intel_gt *gt)
 {
 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+	struct intel_uncore *uncore = gt->uncore;
+	const u32 dmask = irqs << 16 | irqs;
+	const u32 smask = irqs << 16;
 
 	BUILD_BUG_ON(irqs & 0xffff0000);
 
 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
-	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
-	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
+	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
 
 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
-	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
-	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
-	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
-	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
-	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
+	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
 
 	/*
 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
 	 * is enabled/disabled.
 	 */
-	dev_priv->pm_ier = 0x0;
-	dev_priv->pm_imr = ~dev_priv->pm_ier;
-	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
-	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
+	gt->pm_ier = 0x0;
+	gt->pm_imr = ~gt->pm_ier;
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
 
 	/* Same thing for GuC interrupts */
-	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
-	I915_WRITE(GEN11_GUC_SG_INTR_MASK,  ~0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
 }
 
-static void icp_irq_postinstall(struct drm_device *dev)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 mask = SDE_GMBUS_ICP;
 
 	WARN_ON(I915_READ(SDEIER) != 0);
@@ -4282,32 +4281,27 @@ static void icp_irq_postinstall(struct drm_device *dev)
 	icp_hpd_detection_setup(dev_priv);
 }
 
-static int gen11_irq_postinstall(struct drm_device *dev)
+static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
 
 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
-		icp_irq_postinstall(dev);
+		icp_irq_postinstall(dev_priv);
 
-	gen11_gt_irq_postinstall(dev_priv);
+	gen11_gt_irq_postinstall(&dev_priv->gt);
 	gen8_de_irq_postinstall(dev_priv);
 
 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
 
 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
-	gen11_master_intr_enable(dev_priv->uncore.regs);
+	gen11_master_intr_enable(uncore->regs);
 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
-
-	return 0;
 }
 
-static int cherryview_irq_postinstall(struct drm_device *dev)
+static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	gen8_gt_irq_postinstall(dev_priv);
 
 	spin_lock_irq(&dev_priv->irq_lock);
@@ -4317,13 +4311,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
 
 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 	POSTING_READ(GEN8_MASTER_IRQ);
-
-	return 0;
 }
 
-static void i8xx_irq_reset(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	i9xx_pipestat_irq_reset(dev_priv);
@@ -4331,9 +4322,8 @@ static void i8xx_irq_reset(struct drm_device *dev)
 	GEN2_IRQ_RESET(uncore);
 }
 
-static int i8xx_irq_postinstall(struct drm_device *dev)
+static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u16 enable_mask;
 
@@ -4362,8 +4352,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
 	spin_unlock_irq(&dev_priv->irq_lock);
-
-	return 0;
 }
 
 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
@@ -4444,8 +4432,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
 
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -4488,9 +4475,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 	return ret;
 }
 
-static void i915_irq_reset(struct drm_device *dev)
+static void i915_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4503,9 +4489,8 @@ static void i915_irq_reset(struct drm_device *dev)
 	GEN3_IRQ_RESET(uncore, GEN2_);
 }
 
-static int i915_irq_postinstall(struct drm_device *dev)
+static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 enable_mask;
 
@@ -4543,14 +4528,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	i915_enable_asle_pipestat(dev_priv);
-
-	return 0;
 }
 
 static irqreturn_t i915_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -4601,9 +4583,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 	return ret;
 }
 
-static void i965_irq_reset(struct drm_device *dev)
+static void i965_irq_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 
 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4614,9 +4595,8 @@ static void i965_irq_reset(struct drm_device *dev)
 	GEN3_IRQ_RESET(uncore, GEN2_);
 }
 
-static int i965_irq_postinstall(struct drm_device *dev)
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_uncore *uncore = &dev_priv->uncore;
 	u32 enable_mask;
 	u32 error_mask;
@@ -4666,8 +4646,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	i915_enable_asle_pipestat(dev_priv);
-
-	return 0;
 }
 
 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -4697,8 +4675,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
 static irqreturn_t i965_irq_handler(int irq, void *arg)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = arg;
 	irqreturn_t ret = IRQ_NONE;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -4805,11 +4782,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) >= 8)
 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
-	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
-		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
-	else if (INTEL_GEN(dev_priv) >= 3)
-		dev->driver->get_vblank_counter = i915_get_vblank_counter;
-
 	dev->vblank_disable_immediate = true;
 
 	/* Most platforms treat the display irq block as an always-on
@@ -4831,86 +4803,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 	 */
 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
 
-	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
-	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		dev->driver->irq_handler = cherryview_irq_handler;
-		dev->driver->irq_preinstall = cherryview_irq_reset;
-		dev->driver->irq_postinstall = cherryview_irq_postinstall;
-		dev->driver->irq_uninstall = cherryview_irq_reset;
-		dev->driver->enable_vblank = i965_enable_vblank;
-		dev->driver->disable_vblank = i965_disable_vblank;
-		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-	} else if (IS_VALLEYVIEW(dev_priv)) {
-		dev->driver->irq_handler = valleyview_irq_handler;
-		dev->driver->irq_preinstall = valleyview_irq_reset;
-		dev->driver->irq_postinstall = valleyview_irq_postinstall;
-		dev->driver->irq_uninstall = valleyview_irq_reset;
-		dev->driver->enable_vblank = i965_enable_vblank;
-		dev->driver->disable_vblank = i965_disable_vblank;
-		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-	} else if (INTEL_GEN(dev_priv) >= 11) {
-		dev->driver->irq_handler = gen11_irq_handler;
-		dev->driver->irq_preinstall = gen11_irq_reset;
-		dev->driver->irq_postinstall = gen11_irq_postinstall;
-		dev->driver->irq_uninstall = gen11_irq_reset;
-		dev->driver->enable_vblank = gen8_enable_vblank;
-		dev->driver->disable_vblank = gen8_disable_vblank;
-		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
-	} else if (INTEL_GEN(dev_priv) >= 8) {
-		dev->driver->irq_handler = gen8_irq_handler;
-		dev->driver->irq_preinstall = gen8_irq_reset;
-		dev->driver->irq_postinstall = gen8_irq_postinstall;
-		dev->driver->irq_uninstall = gen8_irq_reset;
-		dev->driver->enable_vblank = gen8_enable_vblank;
-		dev->driver->disable_vblank = gen8_disable_vblank;
-		if (IS_GEN9_LP(dev_priv))
+	if (HAS_GMCH(dev_priv)) {
+		if (I915_HAS_HOTPLUG(dev_priv))
+			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else {
+		if (INTEL_GEN(dev_priv) >= 11)
+			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+		else if (IS_GEN9_LP(dev_priv))
 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 		else
 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
-	} else if (HAS_PCH_SPLIT(dev_priv)) {
-		dev->driver->irq_handler = ironlake_irq_handler;
-		dev->driver->irq_preinstall = ironlake_irq_reset;
-		dev->driver->irq_postinstall = ironlake_irq_postinstall;
-		dev->driver->irq_uninstall = ironlake_irq_reset;
-		dev->driver->enable_vblank = ironlake_enable_vblank;
-		dev->driver->disable_vblank = ironlake_disable_vblank;
-		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
-	} else {
-		if (IS_GEN(dev_priv, 2)) {
-			dev->driver->irq_preinstall = i8xx_irq_reset;
-			dev->driver->irq_postinstall = i8xx_irq_postinstall;
-			dev->driver->irq_handler = i8xx_irq_handler;
-			dev->driver->irq_uninstall = i8xx_irq_reset;
-			dev->driver->enable_vblank = i8xx_enable_vblank;
-			dev->driver->disable_vblank = i8xx_disable_vblank;
-		} else if (IS_I945GM(dev_priv)) {
-			dev->driver->irq_preinstall = i915_irq_reset;
-			dev->driver->irq_postinstall = i915_irq_postinstall;
-			dev->driver->irq_uninstall = i915_irq_reset;
-			dev->driver->irq_handler = i915_irq_handler;
-			dev->driver->enable_vblank = i945gm_enable_vblank;
-			dev->driver->disable_vblank = i945gm_disable_vblank;
-		} else if (IS_GEN(dev_priv, 3)) {
-			dev->driver->irq_preinstall = i915_irq_reset;
-			dev->driver->irq_postinstall = i915_irq_postinstall;
-			dev->driver->irq_uninstall = i915_irq_reset;
-			dev->driver->irq_handler = i915_irq_handler;
-			dev->driver->enable_vblank = i8xx_enable_vblank;
-			dev->driver->disable_vblank = i8xx_disable_vblank;
-		} else {
-			dev->driver->irq_preinstall = i965_irq_reset;
-			dev->driver->irq_postinstall = i965_irq_postinstall;
-			dev->driver->irq_uninstall = i965_irq_reset;
-			dev->driver->irq_handler = i965_irq_handler;
-			dev->driver->enable_vblank = i965_enable_vblank;
-			dev->driver->disable_vblank = i965_disable_vblank;
-		}
-		if (I915_HAS_HOTPLUG(dev_priv))
-			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
 	}
 }
 
@@ -4931,6 +4835,75 @@ void intel_irq_fini(struct drm_i915_private *i915)
 		kfree(i915->l3_parity.remap_info[i]);
 }
 
+static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
+{
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv))
+			return cherryview_irq_handler;
+		else if (IS_VALLEYVIEW(dev_priv))
+			return valleyview_irq_handler;
+		else if (IS_GEN(dev_priv, 4))
+			return i965_irq_handler;
+		else if (IS_GEN(dev_priv, 3))
+			return i915_irq_handler;
+		else
+			return i8xx_irq_handler;
+	} else {
+		if (INTEL_GEN(dev_priv) >= 11)
+			return gen11_irq_handler;
+		else if (INTEL_GEN(dev_priv) >= 8)
+			return gen8_irq_handler;
+		else
+			return ironlake_irq_handler;
+	}
+}
+
+static void intel_irq_reset(struct drm_i915_private *dev_priv)
+{
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv))
+			cherryview_irq_reset(dev_priv);
+		else if (IS_VALLEYVIEW(dev_priv))
+			valleyview_irq_reset(dev_priv);
+		else if (IS_GEN(dev_priv, 4))
+			i965_irq_reset(dev_priv);
+		else if (IS_GEN(dev_priv, 3))
+			i915_irq_reset(dev_priv);
+		else
+			i8xx_irq_reset(dev_priv);
+	} else {
+		if (INTEL_GEN(dev_priv) >= 11)
+			gen11_irq_reset(dev_priv);
+		else if (INTEL_GEN(dev_priv) >= 8)
+			gen8_irq_reset(dev_priv);
+		else
+			ironlake_irq_reset(dev_priv);
+	}
+}
+
+static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	if (HAS_GMCH(dev_priv)) {
+		if (IS_CHERRYVIEW(dev_priv))
+			cherryview_irq_postinstall(dev_priv);
+		else if (IS_VALLEYVIEW(dev_priv))
+			valleyview_irq_postinstall(dev_priv);
+		else if (IS_GEN(dev_priv, 4))
+			i965_irq_postinstall(dev_priv);
+		else if (IS_GEN(dev_priv, 3))
+			i915_irq_postinstall(dev_priv);
+		else
+			i8xx_irq_postinstall(dev_priv);
+	} else {
+		if (INTEL_GEN(dev_priv) >= 11)
+			gen11_irq_postinstall(dev_priv);
+		else if (INTEL_GEN(dev_priv) >= 8)
+			gen8_irq_postinstall(dev_priv);
+		else
+			ironlake_irq_postinstall(dev_priv);
+	}
+}
+
 /**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
@@ -4944,6 +4917,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
  */
 int intel_irq_install(struct drm_i915_private *dev_priv)
 {
+	int irq = dev_priv->drm.pdev->irq;
+	int ret;
+
 	/*
 	 * We enable some interrupt sources in our postinstall hooks, so mark
 	 * interrupts as enabled _before_ actually enabling them to avoid
@@ -4951,7 +4927,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
 	 */
 	dev_priv->runtime_pm.irqs_enabled = true;
 
-	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
+	dev_priv->drm.irq_enabled = true;
+
+	intel_irq_reset(dev_priv);
+
+	ret = request_irq(irq, intel_irq_handler(dev_priv),
+			  IRQF_SHARED, DRIVER_NAME, dev_priv);
+	if (ret < 0) {
+		dev_priv->drm.irq_enabled = false;
+		return ret;
+	}
+
+	intel_irq_postinstall(dev_priv);
+
+	return ret;
 }
 
 /**
@@ -4963,7 +4952,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
  */
 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
-	drm_irq_uninstall(&dev_priv->drm);
+	int irq = dev_priv->drm.pdev->irq;
+
+	/*
+	 * FIXME we can get called twice during driver load
+	 * error handling due to intel_modeset_cleanup()
+	 * calling us out of sequence. Would be nice if
+	 * it didn't do that...
+	 */
+	if (!dev_priv->drm.irq_enabled)
+		return;
+
+	dev_priv->drm.irq_enabled = false;
+
+	intel_irq_reset(dev_priv);
+
+	free_irq(irq, dev_priv);
+
 	intel_hpd_cancel_work(dev_priv);
 	dev_priv->runtime_pm.irqs_enabled = false;
 }
@@ -4977,9 +4982,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
-	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
+	intel_irq_reset(dev_priv);
 	dev_priv->runtime_pm.irqs_enabled = false;
-	synchronize_irq(dev_priv->drm.irq);
+	intel_synchronize_irq(dev_priv);
 }
 
 /**
@@ -4992,6 +4997,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
 {
 	dev_priv->runtime_pm.irqs_enabled = true;
-	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
-	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
+	intel_irq_reset(dev_priv);
+	intel_irq_postinstall(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index cb25dd2..d93fa4e 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -77,8 +77,8 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
 
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask);
+void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask);
 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -102,6 +102,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
 	return dev_priv->runtime_pm.irqs_enabled;
 }
 
+static inline void intel_synchronize_irq(struct drm_i915_private *i915)
+{
+	synchronize_irq(i915->drm.pdev->irq);
+}
+
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask);
@@ -114,4 +119,23 @@ void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
 void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
 void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
 
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+			      bool in_vblank_irq, int *vpos, int *hpos,
+			      ktime_t *stime, ktime_t *etime,
+			      const struct drm_display_mode *mode);
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+
+int i8xx_enable_vblank(struct drm_crtc *crtc);
+int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i965_enable_vblank(struct drm_crtc *crtc);
+int ilk_enable_vblank(struct drm_crtc *crtc);
+int bdw_enable_vblank(struct drm_crtc *crtc);
+void i8xx_disable_vblank(struct drm_crtc *crtc);
+void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i965_disable_vblank(struct drm_crtc *crtc);
+void ilk_disable_vblank(struct drm_crtc *crtc);
+void bdw_disable_vblank(struct drm_crtc *crtc);
+
 #endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5b07766..296452f 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -169,8 +169,9 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
 	"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
 #endif
 
-i915_param_named(enable_dpcd_backlight, bool, 0600,
-	"Enable support for DPCD backlight control (default:false)");
+i915_param_named(enable_dpcd_backlight, int, 0600,
+	"Enable support for DPCD backlight control"
+	"(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT)
 i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index a4770ce..d29ade3 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
 	param(int, reset, 2) \
 	param(unsigned int, inject_load_failure, 0) \
 	param(int, fastboot, -1) \
+	param(int, enable_dpcd_backlight, 0) \
 	param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
 	/* leave bools at the end to not create holes */ \
 	param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
@@ -76,7 +77,6 @@ struct drm_printer;
 	param(bool, verbose_state_checks, true) \
 	param(bool, nuclear_pageflip, false) \
 	param(bool, enable_dp_mst, true) \
-	param(bool, enable_dpcd_backlight, false) \
 	param(bool, enable_gvt, false)
 
 #define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6c9f46f..94b588e 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -761,7 +761,7 @@ static const struct intel_device_info intel_elkhartlake_info = {
 	GEN11_FEATURES,
 	PLATFORM(INTEL_ELKHARTLAKE),
 	.require_force_probe = 1,
-	.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+	.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 	.ppgtt_size = 36,
 };
 
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3d8162d..007826d 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -200,20 +200,20 @@
 #include "gt/intel_lrc_reg.h"
 
 #include "i915_drv.h"
-#include "i915_oa_hsw.h"
-#include "i915_oa_bdw.h"
-#include "i915_oa_chv.h"
-#include "i915_oa_sklgt2.h"
-#include "i915_oa_sklgt3.h"
-#include "i915_oa_sklgt4.h"
-#include "i915_oa_bxt.h"
-#include "i915_oa_kblgt2.h"
-#include "i915_oa_kblgt3.h"
-#include "i915_oa_glk.h"
-#include "i915_oa_cflgt2.h"
-#include "i915_oa_cflgt3.h"
-#include "i915_oa_cnl.h"
-#include "i915_oa_icl.h"
+#include "oa/i915_oa_hsw.h"
+#include "oa/i915_oa_bdw.h"
+#include "oa/i915_oa_chv.h"
+#include "oa/i915_oa_sklgt2.h"
+#include "oa/i915_oa_sklgt3.h"
+#include "oa/i915_oa_sklgt4.h"
+#include "oa/i915_oa_bxt.h"
+#include "oa/i915_oa_kblgt2.h"
+#include "oa/i915_oa_kblgt3.h"
+#include "oa/i915_oa_glk.h"
+#include "oa/i915_oa_cflgt2.h"
+#include "oa/i915_oa_cflgt3.h"
+#include "oa/i915_oa_cnl.h"
+#include "oa/i915_oa_icl.h"
 
 /* HW requires this to be a power of two, between 128k and 16M, though driver
  * is currently generally designed assuming the largest 16M size is used such
@@ -1569,28 +1569,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
 	}
 }
 
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static void delay_after_mux(void)
 {
-	struct drm_i915_private *dev_priv = stream->dev_priv;
-	const struct i915_oa_config *oa_config = stream->oa_config;
-
-	/* PRM:
-	 *
-	 * OA unit is using “crclk” for its functionality. When trunk
-	 * level clock gating takes place, OA clock would be gated,
-	 * unable to count the events from non-render clock domain.
-	 * Render clock gating must be disabled when OA is enabled to
-	 * count the events from non-render domain. Unit level clock
-	 * gating for RCS should also be disabled.
-	 */
-	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
-				    ~GEN7_DOP_CLOCK_GATE_ENABLE));
-	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
-				  GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
-	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
-
-	/* It apparently takes a fairly long time for a new MUX
+	/*
+	 * It apparently takes a fairly long time for a new MUX
 	 * configuration to be be applied after these register writes.
 	 * This delay duration was derived empirically based on the
 	 * render_basic config but hopefully it covers the maximum
@@ -1612,6 +1594,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
 	 * a delay at this location would mitigate any invalid reports.
 	 */
 	usleep_range(15000, 20000);
+}
+
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+{
+	struct drm_i915_private *dev_priv = stream->dev_priv;
+	const struct i915_oa_config *oa_config = stream->oa_config;
+
+	/*
+	 * PRM:
+	 *
+	 * OA unit is using “crclk” for its functionality. When trunk
+	 * level clock gating takes place, OA clock would be gated,
+	 * unable to count the events from non-render clock domain.
+	 * Render clock gating must be disabled when OA is enabled to
+	 * count the events from non-render domain. Unit level clock
+	 * gating for RCS should also be disabled.
+	 */
+	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+				    ~GEN7_DOP_CLOCK_GATE_ENABLE));
+	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+				  GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+	delay_after_mux();
 
 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
 		       oa_config->b_counter_regs_len);
@@ -1837,6 +1843,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
 		return ret;
 
 	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+	delay_after_mux();
 
 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
 		       oa_config->b_counter_regs_len);
@@ -2517,6 +2524,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
 	i915_perf_destroy_locked(stream);
 	mutex_unlock(&dev_priv->perf.lock);
 
+	/* Release the reference the perf stream kept on the driver. */
+	drm_dev_put(&dev_priv->drm);
+
 	return 0;
 }
 
@@ -2652,6 +2662,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
 		i915_perf_enable_locked(stream);
 
+	/* Take a reference on the driver that will be kept with stream_fd
+	 * until its release.
+	 */
+	drm_dev_get(&dev_priv->drm);
+
 	return stream_fd;
 
 err_open:
@@ -3479,9 +3494,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 			dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
 			dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
 
-			dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
-			dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
+			if (IS_GEN(dev_priv, 10)) {
+				dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+				dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+			} else {
+				dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
+				dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+			}
 			dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8fe46ee..eff8648 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,10 +102,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 	/*
 	 * Also there is software busyness tracking available we do not
 	 * need the timer for I915_SAMPLE_BUSY counter.
-	 *
-	 * Use RCS as proxy for all engines.
 	 */
-	else if (intel_engine_supports_stats(i915->engine[RCS0]))
+	else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
 		enable &= ~BIT(I915_SAMPLE_BUSY);
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 49709de..b02dea1 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -17,6 +17,16 @@ enum {
 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
 
+	/*
+	 * Requests containing performance queries must not be preempted by
+	 * another context. They get scheduled with their default priority and
+	 * once they reach the execlist ports we ensure that they stick on the
+	 * HW until finished by pretending that they have maximum priority,
+	 * i.e. nothing can have higher priority and force us to usurp the
+	 * active request.
+	 */
+	I915_PRIORITY_UNPREEMPTABLE = INT_MAX,
+
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 969e514..683e97a 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -24,6 +24,8 @@
 #ifndef _I915_PVINFO_H_
 #define _I915_PVINFO_H_
 
+#include <linux/types.h>
+
 /* The MMIO offset of the shared info between guest and host emulator */
 #define VGT_PVINFO_PAGE	0x78000
 #define VGT_PVINFO_SIZE	0x1000
@@ -110,8 +112,9 @@ struct vgt_if {
 	u32  rsv7[0x200 - 24];    /* pad to one page */
 } __packed;
 
-#define vgtif_reg(x) \
-	_MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+#define vgtif_offset(x) (offsetof(struct vgt_if, x))
+
+#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x))
 
 /* vGPU display status to be used by the host side */
 #define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d6483b5..5898f59 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1793,8 +1793,10 @@ enum i915_power_well_id {
  */
 #define _ICL_COMBOPHY_A			0x162000
 #define _ICL_COMBOPHY_B			0x6C000
+#define _EHL_COMBOPHY_C			0x160000
 #define _ICL_COMBOPHY(port)		_PICK(port, _ICL_COMBOPHY_A, \
-					      _ICL_COMBOPHY_B)
+					      _ICL_COMBOPHY_B, \
+					      _EHL_COMBOPHY_C)
 
 /* CNL/ICL Port CL_DW registers */
 #define _ICL_PORT_CL_DW(dw, port)	(_ICL_COMBOPHY(port) + \
@@ -1896,6 +1898,8 @@ enum i915_power_well_id {
 #define ICL_PORT_PCS_DW1_GRP(port)	_MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
 #define ICL_PORT_PCS_DW1_LN0(port)	_MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
 #define   COMMON_KEEPER_EN		(1 << 26)
+#define   LATENCY_OPTIM_MASK		(0x3 << 2)
+#define   LATENCY_OPTIM_VAL(x)		((x) << 2)
 
 /* CNL/ICL Port TX registers */
 #define _CNL_PORT_TX_AE_GRP_OFFSET		0x162340
@@ -1991,6 +1995,10 @@ enum i915_power_well_id {
 #define   N_SCALAR(x)			((x) << 24)
 #define   N_SCALAR_MASK			(0x7F << 24)
 
+#define _ICL_DPHY_CHKN_REG			0x194
+#define ICL_DPHY_CHKN(port)			_MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
+#define   ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP	REG_BIT(7)
+
 #define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
 	_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
 
@@ -6284,6 +6292,7 @@ enum {
 #define _DSPATILEOFF				0x701A4 /* 965+ only */
 #define _DSPAOFFSET				0x701A4 /* HSW */
 #define _DSPASURFLIVE				0x701AC
+#define _DSPAGAMC				0x701E0
 
 #define DSPCNTR(plane)		_MMIO_PIPE2(plane, _DSPACNTR)
 #define DSPADDR(plane)		_MMIO_PIPE2(plane, _DSPAADDR)
@@ -6295,6 +6304,7 @@ enum {
 #define DSPLINOFF(plane)	DSPADDR(plane)
 #define DSPOFFSET(plane)	_MMIO_PIPE2(plane, _DSPAOFFSET)
 #define DSPSURFLIVE(plane)	_MMIO_PIPE2(plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i)	_MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
 
 /* CHV pipe B blender and primary plane */
 #define _CHV_BLEND_A		0x60a00
@@ -6397,6 +6407,7 @@ enum {
 #define _DVSAKEYMAXVAL		0x721a0
 #define _DVSATILEOFF		0x721a4
 #define _DVSASURFLIVE		0x721ac
+#define _DVSAGAMC_G4X		0x721e0 /* g4x */
 #define _DVSASCALE		0x72204
 #define   DVS_SCALE_ENABLE	(1 << 31)
 #define   DVS_FILTER_MASK	(3 << 29)
@@ -6405,7 +6416,8 @@ enum {
 #define   DVS_FILTER_SOFTENING	(2 << 29)
 #define   DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
 #define   DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
-#define _DVSAGAMC		0x72300
+#define _DVSAGAMC_ILK		0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK	0x72340 /* ilk/snb */
 
 #define _DVSBCNTR		0x73180
 #define _DVSBLINOFF		0x73184
@@ -6418,8 +6430,10 @@ enum {
 #define _DVSBKEYMAXVAL		0x731a0
 #define _DVSBTILEOFF		0x731a4
 #define _DVSBSURFLIVE		0x731ac
+#define _DVSBGAMC_G4X		0x731e0 /* g4x */
 #define _DVSBSCALE		0x73204
-#define _DVSBGAMC		0x73300
+#define _DVSBGAMC_ILK		0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK	0x73340 /* ilk/snb */
 
 #define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
 #define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
@@ -6433,6 +6447,9 @@ enum {
 #define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
 #define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
 #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
 
 #define _SPRA_CTL		0x70280
 #define   SPRITE_ENABLE			(1 << 31)
@@ -6457,7 +6474,7 @@ enum {
 #define   SPRITE_YUV_ORDER_VYUY		(3 << 16)
 #define   SPRITE_ROTATE_180		(1 << 15)
 #define   SPRITE_TRICKLE_FEED_DISABLE	(1 << 14)
-#define   SPRITE_INT_GAMMA_ENABLE	(1 << 13)
+#define   SPRITE_INT_GAMMA_DISABLE	(1 << 13)
 #define   SPRITE_TILED			(1 << 10)
 #define   SPRITE_DEST_KEY		(1 << 2)
 #define _SPRA_LINOFF		0x70284
@@ -6480,6 +6497,8 @@ enum {
 #define   SPRITE_VERTICAL_OFFSET_HALF	(1 << 28) /* must be enabled below */
 #define   SPRITE_VERTICAL_OFFSET_ENABLE	(1 << 27)
 #define _SPRA_GAMC		0x70400
+#define _SPRA_GAMC16		0x70440
+#define _SPRA_GAMC17		0x7044c
 
 #define _SPRB_CTL		0x71280
 #define _SPRB_LINOFF		0x71284
@@ -6495,6 +6514,8 @@ enum {
 #define _SPRB_SURFLIVE		0x712ac
 #define _SPRB_SCALE		0x71304
 #define _SPRB_GAMC		0x71400
+#define _SPRB_GAMC16		0x71440
+#define _SPRB_GAMC17		0x7144c
 
 #define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
 #define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
@@ -6508,7 +6529,9 @@ enum {
 #define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
 #define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
 #define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
 #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
 #define _SPACNTR		(VLV_DISPLAY_BASE + 0x72180)
@@ -6551,7 +6574,7 @@ enum {
 #define _SPACLRC1		(VLV_DISPLAY_BASE + 0x721d4)
 #define   SP_SH_SIN(x)			(((x) & 0x7ff) << 16) /* s4.7 */
 #define   SP_SH_COS(x)			(x) /* u3.7 */
-#define _SPAGAMC		(VLV_DISPLAY_BASE + 0x721f4)
+#define _SPAGAMC		(VLV_DISPLAY_BASE + 0x721e0)
 
 #define _SPBCNTR		(VLV_DISPLAY_BASE + 0x72280)
 #define _SPBLINOFF		(VLV_DISPLAY_BASE + 0x72284)
@@ -6566,10 +6589,12 @@ enum {
 #define _SPBCONSTALPHA		(VLV_DISPLAY_BASE + 0x722a8)
 #define _SPBCLRC0		(VLV_DISPLAY_BASE + 0x722d0)
 #define _SPBCLRC1		(VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC		(VLV_DISPLAY_BASE + 0x722f4)
+#define _SPBGAMC		(VLV_DISPLAY_BASE + 0x722e0)
 
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+	_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
 #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
-	_MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+	_MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
 
 #define SPCNTR(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
 #define SPLINOFF(pipe, plane_id)	_MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
@@ -6584,7 +6609,7 @@ enum {
 #define SPCONSTALPHA(pipe, plane_id)	_MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
 #define SPCLRC0(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
 #define SPCLRC1(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+#define SPGAMC(pipe, plane_id, i)	_MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
 
 /*
  * CHV pipe B sprite CSC
@@ -11145,6 +11170,7 @@ enum skl_power_gate {
 #define _ICL_PHY_MISC_B		0x64C04
 #define ICL_PHY_MISC(port)	_MMIO_PORT(port, _ICL_PHY_MISC_A, \
 						 _ICL_PHY_MISC_B)
+#define  ICL_PHY_MISC_MUX_DDID			(1 << 28)
 #define  ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN	(1 << 23)
 
 /* Icelake Display Stream Compression Registers */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92..222c9c5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -119,6 +119,50 @@ const struct dma_fence_ops i915_fence_ops = {
 	.release = i915_fence_release,
 };
 
+static void irq_execute_cb(struct irq_work *wrk)
+{
+	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+	i915_sw_fence_complete(cb->fence);
+	kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+	cb->hook(container_of(cb->fence, struct i915_request, submit),
+		 &cb->signal->fence);
+	i915_request_put(cb->signal);
+
+	irq_execute_cb(wrk);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
+{
+	struct execute_cb *cb;
+
+	lockdep_assert_held(&rq->lock);
+
+	if (list_empty(&rq->execute_cb))
+		return;
+
+	list_for_each_entry(cb, &rq->execute_cb, link)
+		irq_work_queue(&cb->work);
+
+	/*
+	 * XXX Rollback on __i915_request_unsubmit()
+	 *
+	 * In the future, perhaps when we have an active time-slicing scheduler,
+	 * it will be interesting to unsubmit parallel execution and remove
+	 * busywaits from the GPU until their master is restarted. This is
+	 * quite hairy, we have to carefully rollback the fence and do a
+	 * preempt-to-idle cycle on the target engine, all the while the
+	 * master execute_cb may refire.
+	 */
+	INIT_LIST_HEAD(&rq->execute_cb);
+}
+
 static inline void
 i915_request_remove_from_client(struct i915_request *request)
 {
@@ -232,6 +276,12 @@ static bool i915_request_retire(struct i915_request *rq)
 
 	local_irq_disable();
 
+	/*
+	 * We only loosely track inflight requests across preemption,
+	 * and so we may find ourselves attempting to retire a _completed_
+	 * request that we have removed from the HW and put back on a run
+	 * queue.
+	 */
 	spin_lock(&rq->engine->active.lock);
 	list_del(&rq->sched.link);
 	spin_unlock(&rq->engine->active.lock);
@@ -242,10 +292,15 @@ static bool i915_request_retire(struct i915_request *rq)
 		dma_fence_signal_locked(&rq->fence);
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
 		i915_request_cancel_breadcrumb(rq);
-	if (rq->waitboost) {
+	if (i915_request_has_waitboost(rq)) {
 		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
 		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
 	}
+	if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+		set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+		__notify_execute_cb(rq);
+	}
+	GEM_BUG_ON(!list_empty(&rq->execute_cb));
 	spin_unlock(&rq->lock);
 
 	local_irq_enable();
@@ -285,50 +340,6 @@ void i915_request_retire_upto(struct i915_request *rq)
 	} while (i915_request_retire(tmp) && tmp != rq);
 }
 
-static void irq_execute_cb(struct irq_work *wrk)
-{
-	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
-	i915_sw_fence_complete(cb->fence);
-	kmem_cache_free(global.slab_execute_cbs, cb);
-}
-
-static void irq_execute_cb_hook(struct irq_work *wrk)
-{
-	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
-	cb->hook(container_of(cb->fence, struct i915_request, submit),
-		 &cb->signal->fence);
-	i915_request_put(cb->signal);
-
-	irq_execute_cb(wrk);
-}
-
-static void __notify_execute_cb(struct i915_request *rq)
-{
-	struct execute_cb *cb;
-
-	lockdep_assert_held(&rq->lock);
-
-	if (list_empty(&rq->execute_cb))
-		return;
-
-	list_for_each_entry(cb, &rq->execute_cb, link)
-		irq_work_queue(&cb->work);
-
-	/*
-	 * XXX Rollback on __i915_request_unsubmit()
-	 *
-	 * In the future, perhaps when we have an active time-slicing scheduler,
-	 * it will be interesting to unsubmit parallel execution and remove
-	 * busywaits from the GPU until their master is restarted. This is
-	 * quite hairy, we have to carefully rollback the fence and do a
-	 * preempt-to-idle cycle on the target engine, all the while the
-	 * master execute_cb may refire.
-	 */
-	INIT_LIST_HEAD(&rq->execute_cb);
-}
-
 static int
 __i915_request_await_execution(struct i915_request *rq,
 			       struct i915_request *signal,
@@ -596,7 +607,7 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
-	struct i915_timeline *tl = ce->ring->timeline;
+	struct intel_timeline *tl = ce->ring->timeline;
 	struct i915_request *rq;
 	u32 seqno;
 	int ret;
@@ -645,7 +656,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 		}
 	}
 
-	ret = i915_timeline_get_seqno(tl, rq, &seqno);
+	ret = intel_timeline_get_seqno(tl, rq, &seqno);
 	if (ret)
 		goto err_free;
 
@@ -673,7 +684,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq->file_priv = NULL;
 	rq->batch = NULL;
 	rq->capture_list = NULL;
-	rq->waitboost = false;
+	rq->flags = 0;
 	rq->execution_mask = ALL_ENGINES;
 
 	INIT_LIST_HEAD(&rq->active_list);
@@ -764,7 +775,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 		return 0;
 
 	signal = list_prev_entry(signal, ring_link);
-	if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+	if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
 		return 0;
 
 	return i915_sw_fence_await_dma_fence(&rq->submit,
@@ -818,7 +829,7 @@ emit_semaphore_wait(struct i915_request *to,
 		return err;
 
 	/* We need to pin the signaler's HWSP until we are finished reading. */
-	err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
 	if (err)
 		return err;
 
@@ -929,7 +940,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 		/* Squash repeated waits to the same timelines */
 		if (fence->context != rq->i915->mm.unordered_timeline &&
-		    i915_timeline_sync_is_later(rq->timeline, fence))
+		    intel_timeline_sync_is_later(rq->timeline, fence))
 			continue;
 
 		if (dma_fence_is_i915(fence))
@@ -943,7 +954,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 		/* Record the latest fence used against each timeline */
 		if (fence->context != rq->i915->mm.unordered_timeline)
-			i915_timeline_sync_set(rq->timeline, fence);
+			intel_timeline_sync_set(rq->timeline, fence);
 	} while (--nchild);
 
 	return 0;
@@ -1081,7 +1092,7 @@ void i915_request_skip(struct i915_request *rq, int error)
 static struct i915_request *
 __i915_request_add_to_timeline(struct i915_request *rq)
 {
-	struct i915_timeline *timeline = rq->timeline;
+	struct intel_timeline *timeline = rq->timeline;
 	struct i915_request *prev;
 
 	/*
@@ -1447,8 +1458,10 @@ long i915_request_wait(struct i915_request *rq,
 	for (;;) {
 		set_current_state(state);
 
-		if (i915_request_completed(rq))
+		if (i915_request_completed(rq)) {
+			dma_fence_signal(&rq->fence);
 			break;
+		}
 
 		if (signal_pending_state(state, current)) {
 			timeout = -ERESTARTSYS;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index edbbdfe..313df3c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,7 @@
 #include <linux/dma-fence.h>
 #include <linux/lockdep.h>
 
+#include "gt/intel_context_types.h"
 #include "gt/intel_engine_types.h"
 
 #include "i915_gem.h"
@@ -40,8 +41,8 @@
 struct drm_file;
 struct drm_i915_gem_object;
 struct i915_request;
-struct i915_timeline;
-struct i915_timeline_cacheline;
+struct intel_timeline;
+struct intel_timeline_cacheline;
 
 struct i915_capture_list {
 	struct i915_capture_list *next;
@@ -112,7 +113,7 @@ struct i915_request {
 	struct intel_engine_cs *engine;
 	struct intel_context *hw_context;
 	struct intel_ring *ring;
-	struct i915_timeline *timeline;
+	struct intel_timeline *timeline;
 	struct list_head signal_link;
 
 	/*
@@ -175,7 +176,7 @@ struct i915_request {
 	 * inside the timeline's HWSP vma, but it is only valid while this
 	 * request has not completed and guarded by the timeline mutex.
 	 */
-	struct i915_timeline_cacheline *hwsp_cacheline;
+	struct intel_timeline_cacheline *hwsp_cacheline;
 
 	/** Position in the ring of the start of the request */
 	u32 head;
@@ -215,7 +216,9 @@ struct i915_request {
 	/** Time at which this request was emitted, in jiffies. */
 	unsigned long emitted_jiffies;
 
-	bool waitboost;
+	unsigned long flags;
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
 
 	/** timeline->request entry for this request */
 	struct list_head link;
@@ -429,6 +432,17 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
 }
 
+static inline bool i915_request_has_waitboost(const struct i915_request *rq)
+{
+	return rq->flags & I915_REQUEST_WAITBOOST;
+}
+
+static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
+{
+	/* Preemption should only be disabled very rarely */
+	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+}
+
 bool i915_retire_requests(struct drm_i915_private *i915);
 
 #endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2e9b38b..0bd452e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -179,8 +179,7 @@ static inline int rq_prio(const struct i915_request *rq)
 
 static void kick_submission(struct intel_engine_cs *engine, int prio)
 {
-	const struct i915_request *inflight =
-		port_request(engine->execlists.port);
+	const struct i915_request *inflight = *engine->execlists.active;
 
 	/*
 	 * If we are already the currently executing context, don't
@@ -395,6 +394,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 		list_add(&dep->wait_link, &signal->waiters_list);
 		list_add(&dep->signal_link, &node->signalers_list);
 		dep->signaler = signal;
+		dep->waiter = node;
 		dep->flags = flags;
 
 		/* Keep track of whether anyone on this chain has a semaphore */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 3e30963..aad81ac 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -62,6 +62,7 @@ struct i915_sched_node {
 
 struct i915_dependency {
 	struct i915_sched_node *signaler;
+	struct i915_sched_node *waiter;
 	struct list_head signal_link;
 	struct list_head wait_link;
 	struct list_head dfs_link;
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 207e21b..d9b17b9 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -66,12 +66,28 @@ struct i915_subtest {
 	const char *name;
 };
 
+int __i915_nop_setup(void *data);
+int __i915_nop_teardown(int err, void *data);
+
+int __i915_live_setup(void *data);
+int __i915_live_teardown(int err, void *data);
+
 int __i915_subtests(const char *caller,
+		    int (*setup)(void *data),
+		    int (*teardown)(int err, void *data),
 		    const struct i915_subtest *st,
 		    unsigned int count,
 		    void *data);
 #define i915_subtests(T, data) \
-	__i915_subtests(__func__, T, ARRAY_SIZE(T), data)
+	__i915_subtests(__func__, \
+			__i915_nop_setup, __i915_nop_teardown, \
+			T, ARRAY_SIZE(T), data)
+#define i915_live_subtests(T, data) ({ \
+	typecheck(struct drm_i915_private *, data); \
+	__i915_subtests(__func__, \
+			__i915_live_setup, __i915_live_teardown, \
+			T, ARRAY_SIZE(T), data); \
+})
 
 #define SUBTEST(x) { x, #x }
 
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
deleted file mode 100644
index 36e5e5a..0000000
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef I915_TIMELINE_H
-#define I915_TIMELINE_H
-
-#include <linux/lockdep.h>
-
-#include "i915_active.h"
-#include "i915_syncmap.h"
-#include "i915_timeline_types.h"
-
-int i915_timeline_init(struct drm_i915_private *i915,
-		       struct i915_timeline *tl,
-		       struct i915_vma *hwsp);
-void i915_timeline_fini(struct i915_timeline *tl);
-
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
-		     struct i915_vma *global_hwsp);
-
-static inline struct i915_timeline *
-i915_timeline_get(struct i915_timeline *timeline)
-{
-	kref_get(&timeline->kref);
-	return timeline;
-}
-
-void __i915_timeline_free(struct kref *kref);
-static inline void i915_timeline_put(struct i915_timeline *timeline)
-{
-	kref_put(&timeline->kref, __i915_timeline_free);
-}
-
-static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
-					   u64 context, u32 seqno)
-{
-	return i915_syncmap_set(&tl->sync, context, seqno);
-}
-
-static inline int i915_timeline_sync_set(struct i915_timeline *tl,
-					 const struct dma_fence *fence)
-{
-	return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
-}
-
-static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
-						 u64 context, u32 seqno)
-{
-	return i915_syncmap_is_later(&tl->sync, context, seqno);
-}
-
-static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
-					       const struct dma_fence *fence)
-{
-	return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
-}
-
-int i915_timeline_pin(struct i915_timeline *tl);
-int i915_timeline_get_seqno(struct i915_timeline *tl,
-			    struct i915_request *rq,
-			    u32 *seqno);
-void i915_timeline_unpin(struct i915_timeline *tl);
-
-int i915_timeline_read_hwsp(struct i915_request *from,
-			    struct i915_request *until,
-			    u32 *hwsp_offset);
-
-void i915_timelines_init(struct drm_i915_private *i915);
-void i915_timelines_park(struct drm_i915_private *i915);
-void i915_timelines_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f4ce643..cce426b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,24 +21,22 @@
 /* watermark/fifo updates */
 
 TRACE_EVENT(intel_pipe_enable,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-	    TP_ARGS(dev_priv, pipe),
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
 
 	    TP_STRUCT__entry(
 			     __array(u32, frame, 3)
 			     __array(u32, scanline, 3)
 			     __field(enum pipe, pipe)
 			     ),
-
 	    TP_fast_assign(
-			   enum pipe _pipe;
-			   for_each_pipe(dev_priv, _pipe) {
-				   __entry->frame[_pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
-				   __entry->scanline[_pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+			   struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+			   struct intel_crtc *it__;
+			   for_each_intel_crtc(&dev_priv->drm, it__) {
+				   __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+				   __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
 			   }
-			   __entry->pipe = pipe;
+			   __entry->pipe = crtc->pipe;
 			   ),
 
 	    TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
 );
 
 TRACE_EVENT(intel_pipe_disable,
-	    TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-	    TP_ARGS(dev_priv, pipe),
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
 
 	    TP_STRUCT__entry(
 			     __array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
 			     ),
 
 	    TP_fast_assign(
-			   enum pipe _pipe;
-			   for_each_pipe(dev_priv, _pipe) {
-				   __entry->frame[_pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
-				   __entry->scanline[_pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+			   struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+			   struct intel_crtc *it__;
+			   for_each_intel_crtc(&dev_priv->drm, it__) {
+				   __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+				   __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
 			   }
-			   __entry->pipe = pipe;
+			   __entry->pipe = crtc->pipe;
 			   ),
 
 	    TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
 
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
 			   ),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
 			     ),
 
 	    TP_fast_assign(
+			    struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 			   __entry->pipe = pipe;
-			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   ),
 
 	    TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
 
 	    TP_fast_assign(
 			   enum pipe pipe = pch_transcoder;
+			   struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 			   __entry->pipe = pipe;
-			   __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-			   __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   ),
 
 	    TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
 			     ),
 
 	    TP_fast_assign(
-			   enum pipe pipe;
-			   for_each_pipe(dev_priv, pipe) {
-				   __entry->frame[pipe] =
-					   dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-				   __entry->scanline[pipe] =
-					   intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+			   struct intel_crtc *crtc;
+			   for_each_intel_crtc(&dev_priv->drm, crtc) {
+				   __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+				   __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
 			   }
 			   __entry->old = old;
 			   __entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
 
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
 			   __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
 
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   __entry->level = wm->level;
 			   __entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
 
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   __entry->sprite0_start = sprite0_start;
 			   __entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
 			   __entry->name = plane->name;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
 			   memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
 			   __entry->name = plane->name;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   ),
 
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
 
 	    TP_fast_assign(
 			   __entry->pipe = crtc->pipe;
-			   __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-										       crtc->pipe);
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
 			   __entry->scanline = intel_get_crtc_scanline(crtc);
 			   __entry->min = crtc->debug.min_vbl;
 			   __entry->max = crtc->debug.max_vbl;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2987219..4920ff9 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -131,6 +131,18 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
 	((typeof(ptr))((unsigned long)(ptr) | __bits));			\
 })
 
+#define ptr_count_dec(p_ptr) do {					\
+	typeof(p_ptr) __p = (p_ptr);					\
+	unsigned long __v = (unsigned long)(*__p);			\
+	*__p = (typeof(*p_ptr))(--__v);					\
+} while (0)
+
+#define ptr_count_inc(p_ptr) do {					\
+	typeof(p_ptr) __p = (p_ptr);					\
+	unsigned long __v = (unsigned long)(*__p);			\
+	*__p = (typeof(*p_ptr))(++__v);					\
+} while (0)
+
 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992..dbd1fa3 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -52,34 +52,53 @@
  */
 
 /**
- * i915_check_vgpu - detect virtual GPU
+ * i915_detect_vgpu - detect virtual GPU
  * @dev_priv: i915 device private
  *
  * This function is called at the initialization stage, to detect whether
  * running on a vGPU.
  */
-void i915_check_vgpu(struct drm_i915_private *dev_priv)
+void i915_detect_vgpu(struct drm_i915_private *dev_priv)
 {
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u64 magic;
 	u16 version_major;
+	void __iomem *shared_area;
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-	magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
-	if (magic != VGT_MAGIC)
+	/*
+	 * This is called before we setup the main MMIO BAR mappings used via
+	 * the uncore structure, so we need to access the BAR directly. Since
+	 * we do not support VGT on older gens, return early so we don't have
+	 * to consider differently numbered or sized MMIO bars
+	 */
+	if (INTEL_GEN(dev_priv) < 6)
 		return;
 
-	version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
-	if (version_major < VGT_VERSION_MAJOR) {
-		DRM_INFO("VGT interface version mismatch!\n");
+	shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
+	if (!shared_area) {
+		DRM_ERROR("failed to map MMIO bar to check for VGT\n");
 		return;
 	}
 
-	dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+	magic = readq(shared_area + vgtif_offset(magic));
+	if (magic != VGT_MAGIC)
+		goto out;
+
+	version_major = readw(shared_area + vgtif_offset(version_major));
+	if (version_major < VGT_VERSION_MAJOR) {
+		DRM_INFO("VGT interface version mismatch!\n");
+		goto out;
+	}
+
+	dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
 
 	dev_priv->vgpu.active = true;
 	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+
+out:
+	pci_iounmap(pdev, shared_area);
 }
 
 bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
@@ -112,22 +131,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
 
 /**
  * intel_vgt_deballoon - deballoon reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which we reserved earlier
  *
  * This function is called to deallocate the ballooned-out graphic memory, when
  * driver is unloaded or when ballooning fails.
  */
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
+void intel_vgt_deballoon(struct i915_ggtt *ggtt)
 {
 	int i;
 
-	if (!intel_vgpu_active(dev_priv))
+	if (!intel_vgpu_active(ggtt->vm.i915))
 		return;
 
 	DRM_DEBUG("VGT deballoon.\n");
 
 	for (i = 0; i < 4; i++)
-		vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
+		vgt_deballoon_space(ggtt, &bl_info.space[i]);
 }
 
 static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -153,7 +172,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 
 /**
  * intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which to reserve
  *
  * This function is called at the initialization stage, to balloon out the
  * graphic address space allocated to other vGPUs, by marking these spaces as
@@ -195,22 +214,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
  * Returns:
  * zero on success, non-zero if configuration invalid or ballooning failed
  */
-int intel_vgt_balloon(struct drm_i915_private *dev_priv)
+int intel_vgt_balloon(struct i915_ggtt *ggtt)
 {
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
 	unsigned long ggtt_end = ggtt->vm.total;
 
 	unsigned long mappable_base, mappable_size, mappable_end;
 	unsigned long unmappable_base, unmappable_size, unmappable_end;
 	int ret;
 
-	if (!intel_vgpu_active(dev_priv))
+	if (!intel_vgpu_active(ggtt->vm.i915))
 		return 0;
 
-	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
-	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
-	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
-	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+	mappable_base =
+	  intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
+	mappable_size =
+	  intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
+	unmappable_base =
+	  intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
+	unmappable_size =
+	  intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
 
 	mappable_end = mappable_base + mappable_size;
 	unmappable_end = unmappable_base + unmappable_size;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index ebe1b7b..8b3663d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,9 +24,10 @@
 #ifndef _I915_VGPU_H_
 #define _I915_VGPU_H_
 
+#include "i915_drv.h"
 #include "i915_pvinfo.h"
 
-void i915_check_vgpu(struct drm_i915_private *dev_priv);
+void i915_detect_vgpu(struct drm_i915_private *dev_priv);
 
 bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
 
@@ -42,7 +43,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
 	return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
 }
 
-int intel_vgt_balloon(struct drm_i915_private *dev_priv);
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct i915_ggtt *ggtt);
+void intel_vgt_deballoon(struct i915_ggtt *ggtt);
 
 #endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a57729b..ee73baf 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,11 +22,13 @@
  *
  */
 
+#include <linux/sched/mm.h>
 #include <drm/drm_gem.h>
 
 #include "display/intel_frontbuffer.h"
 
 #include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
 
 #include "i915_drv.h"
 #include "i915_globals.h"
@@ -77,43 +79,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
 
 #endif
 
-static void obj_bump_mru(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *active_to_vma(struct i915_active *ref)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	unsigned long flags;
+	return container_of(ref, typeof(struct i915_vma), active);
+}
 
-	spin_lock_irqsave(&i915->mm.obj_lock, flags);
-	list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
-	obj->mm.dirty = true; /* be paranoid  */
+static int __i915_vma_active(struct i915_active *ref)
+{
+	i915_vma_get(active_to_vma(ref));
+	return 0;
 }
 
 static void __i915_vma_retire(struct i915_active *ref)
 {
-	struct i915_vma *vma = container_of(ref, typeof(*vma), active);
-	struct drm_i915_gem_object *obj = vma->obj;
-
-	GEM_BUG_ON(!i915_gem_object_is_active(obj));
-	if (--obj->active_count)
-		return;
-
-	/* Prune the shared fence arrays iff completely idle (inc. external) */
-	if (reservation_object_trylock(obj->base.resv)) {
-		if (reservation_object_test_signaled_rcu(obj->base.resv, true))
-			reservation_object_add_excl_fence(obj->base.resv, NULL);
-		reservation_object_unlock(obj->base.resv);
-	}
-
-	/*
-	 * Bump our place on the bound list to keep it roughly in LRU order
-	 * so that we don't steal from recently used but inactive objects
-	 * (unless we are forced to ofc!)
-	 */
-	if (i915_gem_object_is_shrinkable(obj))
-		obj_bump_mru(obj);
-
-	i915_gem_object_put(obj); /* and drop the active reference */
+	i915_vma_put(active_to_vma(ref));
 }
 
 static struct i915_vma *
@@ -138,9 +117,17 @@ vma_create(struct drm_i915_gem_object *obj,
 	vma->size = obj->base.size;
 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
-	i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+	i915_active_init(vm->i915, &vma->active,
+			 __i915_vma_active, __i915_vma_retire);
 	INIT_ACTIVE_REQUEST(&vma->last_fence);
 
+	/* Declare ourselves safe for use inside shrinkers */
+	if (IS_ENABLED(CONFIG_LOCKDEP)) {
+		fs_reclaim_acquire(GFP_KERNEL);
+		might_lock(&vma->active.mutex);
+		fs_reclaim_release(GFP_KERNEL);
+	}
+
 	INIT_LIST_HEAD(&vma->closed_link);
 
 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
@@ -408,7 +395,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
 	if (!i915_vma_has_ggtt_write(vma))
 		return;
 
-	i915_gem_flush_ggtt_writes(vma->vm->i915);
+	intel_gt_flush_ggtt_writes(vma->vm->gt);
 
 	i915_vma_unset_ggtt_write(vma);
 }
@@ -921,6 +908,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 			    unsigned int flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	int err;
 
 	assert_vma_held(vma);
 	assert_object_held(obj);
@@ -934,17 +922,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!vma->active.count && !obj->active_count++)
-		i915_gem_object_get(obj); /* once more for the active ref */
-
-	if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
-		if (!vma->active.count && !--obj->active_count)
-			i915_gem_object_put(obj);
-		return -ENOMEM;
-	}
-
-	GEM_BUG_ON(!i915_vma_is_active(vma));
-	GEM_BUG_ON(!obj->active_count);
+	err = i915_active_ref(&vma->active, rq->fence.context, rq);
+	if (unlikely(err))
+		return err;
 
 	obj->write_domain = 0;
 	if (flags & EXEC_OBJECT_WRITE) {
@@ -956,11 +936,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 		obj->read_domains = 0;
 	}
 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
+	obj->mm.dirty = true;
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
 		__i915_active_request_set(&vma->last_fence, rq);
 
 	export_fence(vma, rq, flags);
+
+	GEM_BUG_ON(!i915_vma_is_active(vma));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7135d8d..e64536e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -929,35 +929,25 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		}
 	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
 		u32 dfsm = I915_READ(SKL_DFSM);
-		u8 disabled_mask = 0;
-		bool invalid;
-		int num_bits;
+		u8 enabled_mask = BIT(info->num_pipes) - 1;
 
 		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
-			disabled_mask |= BIT(PIPE_A);
+			enabled_mask &= ~BIT(PIPE_A);
 		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
-			disabled_mask |= BIT(PIPE_B);
+			enabled_mask &= ~BIT(PIPE_B);
 		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
-			disabled_mask |= BIT(PIPE_C);
+			enabled_mask &= ~BIT(PIPE_C);
 
-		num_bits = hweight8(disabled_mask);
-
-		switch (disabled_mask) {
-		case BIT(PIPE_A):
-		case BIT(PIPE_B):
-		case BIT(PIPE_A) | BIT(PIPE_B):
-		case BIT(PIPE_A) | BIT(PIPE_C):
-			invalid = true;
-			break;
-		default:
-			invalid = false;
-		}
-
-		if (num_bits > info->num_pipes || invalid)
-			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
-				  disabled_mask);
+		/*
+		 * At least one pipe should be enabled and if there are
+		 * disabled pipes, they should be the last ones, with no holes
+		 * in the mask.
+		 */
+		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
+			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
+				  enabled_mask);
 		else
-			info->num_pipes -= num_bits;
+			info->num_pipes = hweight8(enabled_mask);
 	}
 
 	/* Initialize slice/subslice/EU info */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7e..24c63ed 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -115,6 +115,9 @@ struct intel_encoder {
 	int (*compute_config)(struct intel_encoder *,
 			      struct intel_crtc_state *,
 			      struct drm_connector_state *);
+	void (*update_prepare)(struct intel_atomic_state *,
+			       struct intel_encoder *,
+			       struct intel_crtc *);
 	void (*pre_pll_enable)(struct intel_encoder *,
 			       const struct intel_crtc_state *,
 			       const struct drm_connector_state *);
@@ -124,6 +127,9 @@ struct intel_encoder {
 	void (*enable)(struct intel_encoder *,
 		       const struct intel_crtc_state *,
 		       const struct drm_connector_state *);
+	void (*update_complete)(struct intel_atomic_state *,
+				struct intel_encoder *,
+				struct intel_crtc *);
 	void (*disable)(struct intel_encoder *,
 			const struct intel_crtc_state *,
 			const struct drm_connector_state *);
@@ -812,6 +818,15 @@ struct intel_crtc_state {
 	/* Actual register state of the dpll, for shared dpll cross-checking. */
 	struct intel_dpll_hw_state dpll_hw_state;
 
+	/*
+	 * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
+	 * setting shared_dpll and dpll_hw_state to one of these reserved ones.
+	 */
+	struct icl_port_dpll {
+		struct intel_shared_dpll *pll;
+		struct intel_dpll_hw_state hw_state;
+	} icl_port_dplls[ICL_PORT_DPLL_COUNT];
+
 	/* DSI PLL registers */
 	struct {
 		u32 ctrl, div;
@@ -1224,8 +1239,12 @@ struct intel_digital_port {
 	/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
 	enum aux_ch aux_ch;
 	enum intel_display_power_domain ddi_io_power_domain;
+	struct mutex tc_lock;	/* protects the TypeC port mode */
+	intel_wakeref_t tc_lock_wakeref;
+	int tc_link_refcount;
 	bool tc_legacy_port:1;
-	enum tc_port_type tc_type;
+	char tc_port_name[8];
+	enum tc_port_mode tc_mode;
 
 	void (*write_infoframe)(struct intel_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 08c906a..d91c966 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -35,6 +35,8 @@
 #include "i915_utils.h"
 #include "i915_vma.h"
 
+struct __guc_ads_blob;
+
 struct guc_preempt_work {
 	struct work_struct work;
 	struct intel_engine_cs *engine;
@@ -65,6 +67,8 @@ struct intel_guc {
 	} interrupts;
 
 	struct i915_vma *ads_vma;
+	struct __guc_ads_blob *ads_blob;
+
 	struct i915_vma *stage_desc_pool;
 	void *stage_desc_pool_vaddr;
 	struct ida stage_ids;
@@ -88,6 +92,9 @@ struct intel_guc {
 		enum forcewake_domains fw_domains;
 	} send_regs;
 
+	/* Store msg (e.g. log flush) that we see while CTBs are disabled */
+	u32 mmio_msg;
+
 	/* To serialize the intel_guc_send actions */
 	struct mutex send_mutex;
 
@@ -181,6 +188,8 @@ static inline bool intel_guc_is_loaded(struct intel_guc *guc)
 static inline int intel_guc_sanitize(struct intel_guc *guc)
 {
 	intel_uc_fw_sanitize(&guc->fw);
+	guc->mmio_msg = 0;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index ecb69fc..69859d1 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -83,18 +83,14 @@ struct __guc_ads_blob {
 	u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
 } __packed;
 
-static int __guc_ads_init(struct intel_guc *guc)
+static void __guc_ads_init(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct __guc_ads_blob *blob;
+	struct __guc_ads_blob *blob = guc->ads_blob;
 	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
 	u32 base;
 	u8 engine_class;
 
-	blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
-	if (IS_ERR(blob))
-		return PTR_ERR(blob);
-
 	/* GuC scheduling policies */
 	guc_policies_init(&blob->policies);
 
@@ -144,9 +140,7 @@ static int __guc_ads_init(struct intel_guc *guc)
 	blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
 	blob->ads.clients_info = base + ptr_offset(blob, clients_info);
 
-	i915_gem_object_unpin_map(guc->ads_vma->obj);
-
-	return 0;
+	i915_gem_object_flush_map(guc->ads_vma->obj);
 }
 
 /**
@@ -160,6 +154,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 {
 	const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
 	struct i915_vma *vma;
+	void *blob;
 	int ret;
 
 	GEM_BUG_ON(guc->ads_vma);
@@ -168,11 +163,16 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
-	guc->ads_vma = vma;
-
-	ret = __guc_ads_init(guc);
-	if (ret)
+	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+	if (IS_ERR(blob)) {
+		ret = PTR_ERR(blob);
 		goto err_vma;
+	}
+
+	guc->ads_vma = vma;
+	guc->ads_blob = blob;
+
+	__guc_ads_init(guc);
 
 	return 0;
 
@@ -183,7 +183,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 
 void intel_guc_ads_destroy(struct intel_guc *guc)
 {
-	i915_vma_unpin_and_release(&guc->ads_vma, 0);
+	i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 3921809..9e383a4 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -529,8 +529,8 @@ static int ctch_send(struct intel_guc_ct *ct,
 /*
  * Command Transport (CT) buffer based GuC send function.
  */
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
-			     u32 *response_buf, u32 response_buf_size)
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+		      u32 *response_buf, u32 response_buf_size)
 {
 	struct intel_guc_ct *ct = &guc->ct;
 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
@@ -834,7 +834,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
  * When we're communicating with the GuC over CT, GuC uses events
  * to notify us about new messages being posted on the RECV buffer.
  */
-static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
 {
 	struct intel_guc_ct *ct = &guc->ct;
 
@@ -892,20 +892,11 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
 	struct intel_guc *guc = ct_to_guc(ct);
 	struct intel_guc_ct_channel *ctch = &ct->host_channel;
-	int err;
 
 	if (ctch->enabled)
 		return 0;
 
-	err = ctch_enable(guc, ctch);
-	if (unlikely(err))
-		return err;
-
-	/* Switch into cmd transport buffer based send() */
-	guc->send = intel_guc_send_ct;
-	guc->handler = intel_guc_to_host_event_handler_ct;
-	DRM_INFO("CT: %s\n", enableddisabled(true));
-	return 0;
+	return ctch_enable(guc, ctch);
 }
 
 /**
@@ -921,9 +912,4 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
 		return;
 
 	ctch_disable(guc, ctch);
-
-	/* Disable send */
-	guc->send = intel_guc_send_nop;
-	guc->handler = intel_guc_to_host_event_handler_nop;
-	DRM_INFO("CT: %s\n", enableddisabled(false));
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index 41ba593..8c1f6d1 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -24,11 +24,14 @@
 #ifndef _INTEL_GUC_CT_H_
 #define _INTEL_GUC_CT_H_
 
-struct intel_guc;
-struct i915_vma;
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
 
 #include "intel_guc_fwif.h"
 
+struct i915_vma;
+struct intel_guc;
+
 /**
  * DOC: Command Transport (CT).
  *
@@ -101,4 +104,8 @@ static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
 	ct->host_channel.enabled = false;
 }
 
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+		      u32 *response_buf, u32 response_buf_size);
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+
 #endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 72cdafd..db1e0dac 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -38,37 +38,37 @@
 	__stringify(KEY##_GUC_FW_PATCH) ".bin"
 
 #define SKL_GUC_FW_PREFIX skl
-#define SKL_GUC_FW_MAJOR 32
+#define SKL_GUC_FW_MAJOR 33
 #define SKL_GUC_FW_MINOR 0
-#define SKL_GUC_FW_PATCH 3
+#define SKL_GUC_FW_PATCH 0
 #define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
 MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
 
 #define BXT_GUC_FW_PREFIX bxt
-#define BXT_GUC_FW_MAJOR 32
+#define BXT_GUC_FW_MAJOR 33
 #define BXT_GUC_FW_MINOR 0
-#define BXT_GUC_FW_PATCH 3
+#define BXT_GUC_FW_PATCH 0
 #define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
 MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
 
 #define KBL_GUC_FW_PREFIX kbl
-#define KBL_GUC_FW_MAJOR 32
+#define KBL_GUC_FW_MAJOR 33
 #define KBL_GUC_FW_MINOR 0
-#define KBL_GUC_FW_PATCH 3
+#define KBL_GUC_FW_PATCH 0
 #define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
 MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
 
 #define GLK_GUC_FW_PREFIX glk
-#define GLK_GUC_FW_MAJOR 32
+#define GLK_GUC_FW_MAJOR 33
 #define GLK_GUC_FW_MINOR 0
-#define GLK_GUC_FW_PATCH 3
+#define GLK_GUC_FW_PATCH 0
 #define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
 MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
 
 #define ICL_GUC_FW_PREFIX icl
-#define ICL_GUC_FW_MAJOR 32
+#define ICL_GUC_FW_MAJOR 33
 #define ICL_GUC_FW_MINOR 0
-#define ICL_GUC_FW_PATCH 3
+#define ICL_GUC_FW_PATCH 0
 #define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
 MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
 
@@ -197,6 +197,7 @@ static inline bool guc_ready(struct intel_guc *guc, u32 *status)
 
 static int guc_wait_ucode(struct intel_guc *guc)
 {
+	struct drm_i915_private *i915 = guc_to_i915(guc);
 	u32 status;
 	int ret;
 
@@ -216,6 +217,12 @@ static int guc_wait_ucode(struct intel_guc *guc)
 		ret = -ENOEXEC;
 	}
 
+	if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
+		DRM_ERROR("GuC firmware exception. EIP: %#x\n",
+			  intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13)));
+		ret = -ENXIO;
+	}
+
 	if (ret == 0 && !guc_xfer_completed(guc, &status)) {
 		DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
 			  status);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index f55f3bc..30cca3a 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,6 +23,10 @@
 #ifndef _INTEL_GUC_FWIF_H
 #define _INTEL_GUC_FWIF_H
 
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
 #define GUC_CLIENT_PRIORITY_KMD_HIGH	0
 #define GUC_CLIENT_PRIORITY_HIGH	1
 #define GUC_CLIENT_PRIORITY_KMD_NORMAL	2
@@ -39,13 +43,8 @@
 #define GUC_VIDEO_ENGINE2		4
 #define GUC_MAX_ENGINES_NUM		(GUC_VIDEO_ENGINE2 + 1)
 
-/*
- * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
- * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
- * as we are not enabling GuC submission mode where this will be used
- */
 #define GUC_MAX_ENGINE_CLASSES		5
-#define GUC_MAX_INSTANCES_PER_CLASS	4
+#define GUC_MAX_INSTANCES_PER_CLASS	16
 
 #define GUC_DOORBELL_INVALID		256
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index e3b83ec..06c09ac 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -615,7 +615,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
 	struct drm_i915_private *i915 = guc_to_i915(guc);
 
 	guc_log_disable_flush_events(log);
-	synchronize_irq(i915->drm.irq);
+	intel_synchronize_irq(i915);
 
 	flush_work(&log->relay.flush_work);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index a214f8b..a5ab7bc 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -24,6 +24,11 @@
 #ifndef _INTEL_GUC_REG_H_
 #define _INTEL_GUC_REG_H_
 
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
 /* Definitions of GuC H/W registers, bits, etc */
 
 #define GUC_STATUS			_MMIO(0xc000)
@@ -37,6 +42,7 @@
 #define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_DPC_ERROR		  (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_EXCEPTION		  (0x70 << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
 #define   GS_MIA_SHIFT			16
 #define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index db531eb..f104b94 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -32,7 +32,11 @@
 #include "intel_guc_submission.h"
 #include "i915_drv.h"
 
-#define GUC_PREEMPT_FINISHED		0x1
+enum {
+	GUC_PREEMPT_NONE = 0,
+	GUC_PREEMPT_INPROGRESS,
+	GUC_PREEMPT_FINISHED,
+};
 #define GUC_PREEMPT_BREADCRUMB_DWORDS	0x8
 #define GUC_PREEMPT_BREADCRUMB_BYTES	\
 	(sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
@@ -537,15 +541,11 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
-	spin_lock(&client->wq_lock);
-
 	guc_wq_item_append(client, engine->guc_id, ctx_desc,
 			   ring_tail, rq->fence.seqno);
 	guc_ring_doorbell(client);
 
 	client->submissions[engine->id] += 1;
-
-	spin_unlock(&client->wq_lock);
 }
 
 /*
@@ -631,8 +631,9 @@ static void inject_preempt_context(struct work_struct *work)
 	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
 
 	if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
-		execlists_clear_active(&engine->execlists,
-				       EXECLISTS_ACTIVE_PREEMPT);
+		intel_write_status_page(engine,
+					I915_GEM_HWS_PREEMPT,
+					GUC_PREEMPT_NONE);
 		tasklet_schedule(&engine->execlists.tasklet);
 	}
 
@@ -672,8 +673,6 @@ static void complete_preempt_context(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
 
-	GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
 	if (inject_preempt_hang(execlists))
 		return;
 
@@ -681,89 +680,101 @@ static void complete_preempt_context(struct intel_engine_cs *engine)
 	execlists_unwind_incomplete_requests(execlists);
 
 	wait_for_guc_preempt_report(engine);
-	intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
+	intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE);
 }
 
-/**
- * guc_submit() - Submit commands through GuC
- * @engine: engine associated with the commands
- *
- * The only error here arises if the doorbell hardware isn't functioning
- * as expected, which really shouln't happen.
- */
-static void guc_submit(struct intel_engine_cs *engine)
+static void guc_submit(struct intel_engine_cs *engine,
+		       struct i915_request **out,
+		       struct i915_request **end)
 {
 	struct intel_guc *guc = &engine->i915->guc;
-	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	unsigned int n;
+	struct intel_guc_client *client = guc->execbuf_client;
 
-	for (n = 0; n < execlists_num_ports(execlists); n++) {
-		struct i915_request *rq;
-		unsigned int count;
+	spin_lock(&client->wq_lock);
 
-		rq = port_unpack(&port[n], &count);
-		if (rq && count == 0) {
-			port_set(&port[n], port_pack(rq, ++count));
+	do {
+		struct i915_request *rq = *out++;
 
-			flush_ggtt_writes(rq->ring->vma);
+		flush_ggtt_writes(rq->ring->vma);
+		guc_add_request(guc, rq);
+	} while (out != end);
 
-			guc_add_request(guc, rq);
-		}
-	}
-}
-
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
-	GEM_BUG_ON(port_isset(port));
-
-	port_set(port, i915_request_get(rq));
+	spin_unlock(&client->wq_lock);
 }
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.attr.priority;
+	return rq->sched.attr.priority | __NO_PREEMPTION;
 }
 
-static inline int port_prio(const struct execlist_port *port)
+static inline int effective_prio(const struct i915_request *rq)
 {
-	return rq_prio(port_request(port)) | __NO_PREEMPTION;
+	int prio = rq_prio(rq);
+
+	if (i915_request_has_nopreempt(rq))
+		prio = I915_PRIORITY_UNPREEMPTABLE;
+
+	return prio;
 }
 
-static bool __guc_dequeue(struct intel_engine_cs *engine)
+static struct i915_request *schedule_in(struct i915_request *rq, int idx)
+{
+	trace_i915_request_in(rq, idx);
+
+	if (!rq->hw_context->inflight)
+		rq->hw_context->inflight = rq->engine;
+	intel_context_inflight_inc(rq->hw_context);
+
+	return i915_request_get(rq);
+}
+
+static void schedule_out(struct i915_request *rq)
+{
+	trace_i915_request_out(rq);
+
+	intel_context_inflight_dec(rq->hw_context);
+	if (!intel_context_inflight_count(rq->hw_context))
+		rq->hw_context->inflight = NULL;
+
+	i915_request_put(rq);
+}
+
+static void __guc_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	struct i915_request *last = NULL;
-	const struct execlist_port * const last_port =
-		&execlists->port[execlists->port_mask];
+	struct i915_request **first = execlists->inflight;
+	struct i915_request ** const last_port = first + execlists->port_mask;
+	struct i915_request *last = first[0];
+	struct i915_request **port;
 	bool submit = false;
 	struct rb_node *rb;
 
 	lockdep_assert_held(&engine->active.lock);
 
-	if (port_isset(port)) {
+	if (last) {
 		if (intel_engine_has_preemption(engine)) {
 			struct guc_preempt_work *preempt_work =
 				&engine->i915->guc.preempt_work[engine->id];
 			int prio = execlists->queue_priority_hint;
 
 			if (i915_scheduler_need_preempt(prio,
-							port_prio(port))) {
-				execlists_set_active(execlists,
-						     EXECLISTS_ACTIVE_PREEMPT);
+							effective_prio(last))) {
+				intel_write_status_page(engine,
+							I915_GEM_HWS_PREEMPT,
+							GUC_PREEMPT_INPROGRESS);
 				queue_work(engine->i915->guc.preempt_wq,
 					   &preempt_work->work);
-				return false;
+				return;
 			}
 		}
 
-		port++;
-		if (port_isset(port))
-			return false;
-	}
-	GEM_BUG_ON(port_isset(port));
+		if (*++first)
+			return;
 
+		last = NULL;
+	}
+
+	port = first;
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
@@ -774,18 +785,15 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 				if (port == last_port)
 					goto done;
 
-				if (submit)
-					port_assign(port, last);
+				*port = schedule_in(last,
+						    port - execlists->inflight);
 				port++;
 			}
 
 			list_del_init(&rq->sched.link);
-
 			__i915_request_submit(rq);
-			trace_i915_request_in(rq, port_index(port, execlists));
-
-			last = rq;
 			submit = true;
+			last = rq;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
@@ -794,58 +802,41 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 done:
 	execlists->queue_priority_hint =
 		rb ? to_priolist(rb)->priority : INT_MIN;
-	if (submit)
-		port_assign(port, last);
-	if (last)
-		execlists_user_begin(execlists, execlists->port);
-
-	/* We must always keep the beast fed if we have work piled up */
-	GEM_BUG_ON(port_isset(execlists->port) &&
-		   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-	GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
-		   !port_isset(execlists->port));
-
-	return submit;
-}
-
-static void guc_dequeue(struct intel_engine_cs *engine)
-{
-	if (__guc_dequeue(engine))
-		guc_submit(engine);
+	if (submit) {
+		*port = schedule_in(last, port - execlists->inflight);
+		*++port = NULL;
+		guc_submit(engine, first, port);
+	}
+	execlists->active = execlists->inflight;
 }
 
 static void guc_submission_tasklet(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port *port = execlists->port;
-	struct i915_request *rq;
+	struct i915_request **port, *rq;
 	unsigned long flags;
 
 	spin_lock_irqsave(&engine->active.lock, flags);
 
-	rq = port_request(port);
-	while (rq && i915_request_completed(rq)) {
-		trace_i915_request_out(rq);
-		i915_request_put(rq);
+	for (port = execlists->inflight; (rq = *port); port++) {
+		if (!i915_request_completed(rq))
+			break;
 
-		port = execlists_port_complete(execlists, port);
-		if (port_isset(port)) {
-			execlists_user_begin(execlists, port);
-			rq = port_request(port);
-		} else {
-			execlists_user_end(execlists);
-			rq = NULL;
-		}
+		schedule_out(rq);
+	}
+	if (port != execlists->inflight) {
+		int idx = port - execlists->inflight;
+		int rem = ARRAY_SIZE(execlists->inflight) - idx;
+		memmove(execlists->inflight, port, rem * sizeof(*port));
 	}
 
-	if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
-	    intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
+	if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
 	    GUC_PREEMPT_FINISHED)
 		complete_preempt_context(engine);
 
-	if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
-		guc_dequeue(engine);
+	if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT))
+		__guc_dequeue(engine);
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -959,7 +950,6 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
 
 	execlists->queue_priority_hint = INT_MIN;
 	execlists->queue = RB_ROOT_CACHED;
-	GEM_BUG_ON(port_isset(execlists->port));
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -1422,7 +1412,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
 	 * and it is guaranteed that it will remove the work item from the
 	 * queue before our request is completed.
 	 */
-	BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
+	BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
 		     sizeof(struct guc_wq_item) *
 		     I915_NUM_ENGINES > GUC_WQ_SIZE);
 
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 61b2464..85ce37e 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,7 +24,7 @@
 #ifndef _INTEL_GVT_H_
 #define _INTEL_GVT_H_
 
-struct intel_gvt;
+struct drm_i915_private;
 
 #ifdef CONFIG_DRM_I915_GVT
 int intel_gvt_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d9a7a13..0cecea2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1116,6 +1116,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
+	cpp = plane_state->base.fb->format->cpp[0];
+
 	/*
 	 * Not 100% sure which way ELK should go here as the
 	 * spec only says CL/CTG should assume 32bpp and BW
@@ -1129,9 +1131,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	 */
 	if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
 	    level != G4X_WM_LEVEL_NORMAL)
-		cpp = 4;
-	else
-		cpp = plane_state->base.fb->format->cpp[0];
+		cpp = max(cpp, 4u);
 
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
@@ -1198,8 +1198,8 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
 	return dirty;
 }
 
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state,
 			      u32 pri_val);
 
 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
@@ -2493,8 +2493,8 @@ struct ilk_wm_maximums {
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate,
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state,
 			      u32 mem_value, bool is_lp)
 {
 	u32 method1, method2;
@@ -2503,19 +2503,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 	if (mem_value == 0)
 		return U32_MAX;
 
-	if (!intel_wm_plane_visible(cstate, pstate))
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = pstate->base.fb->format->cpp[0];
+	cpp = plane_state->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
 
 	if (!is_lp)
 		return method1;
 
-	method2 = ilk_wm_method2(cstate->pixel_rate,
-				 cstate->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&pstate->base.dst),
+	method2 = ilk_wm_method2(crtc_state->pixel_rate,
+				 crtc_state->base.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&plane_state->base.dst),
 				 cpp, mem_value);
 
 	return min(method1, method2);
@@ -2525,8 +2525,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate,
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state,
 			      u32 mem_value)
 {
 	u32 method1, method2;
@@ -2535,15 +2535,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 	if (mem_value == 0)
 		return U32_MAX;
 
-	if (!intel_wm_plane_visible(cstate, pstate))
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = pstate->base.fb->format->cpp[0];
+	cpp = plane_state->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
-	method2 = ilk_wm_method2(cstate->pixel_rate,
-				 cstate->base.adjusted_mode.crtc_htotal,
-				 drm_rect_width(&pstate->base.dst),
+	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+	method2 = ilk_wm_method2(crtc_state->pixel_rate,
+				 crtc_state->base.adjusted_mode.crtc_htotal,
+				 drm_rect_width(&plane_state->base.dst),
 				 cpp, mem_value);
 	return min(method1, method2);
 }
@@ -2552,8 +2552,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate,
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state,
 			      u32 mem_value)
 {
 	int cpp;
@@ -2561,29 +2561,29 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 	if (mem_value == 0)
 		return U32_MAX;
 
-	if (!intel_wm_plane_visible(cstate, pstate))
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = pstate->base.fb->format->cpp[0];
+	cpp = plane_state->base.fb->format->cpp[0];
 
-	return ilk_wm_method2(cstate->pixel_rate,
-			      cstate->base.adjusted_mode.crtc_htotal,
-			      pstate->base.crtc_w, cpp, mem_value);
+	return ilk_wm_method2(crtc_state->pixel_rate,
+			      crtc_state->base.adjusted_mode.crtc_htotal,
+			      plane_state->base.crtc_w, cpp, mem_value);
 }
 
 /* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state,
 			      u32 pri_val)
 {
 	int cpp;
 
-	if (!intel_wm_plane_visible(cstate, pstate))
+	if (!intel_wm_plane_visible(crtc_state, plane_state))
 		return 0;
 
-	cpp = pstate->base.fb->format->cpp[0];
+	cpp = plane_state->base.fb->format->cpp[0];
 
-	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
+	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
 }
 
 static unsigned int
@@ -2752,7 +2752,7 @@ static bool ilk_validate_wm_level(int level,
 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 				 const struct intel_crtc *intel_crtc,
 				 int level,
-				 struct intel_crtc_state *cstate,
+				 struct intel_crtc_state *crtc_state,
 				 const struct intel_plane_state *pristate,
 				 const struct intel_plane_state *sprstate,
 				 const struct intel_plane_state *curstate,
@@ -2770,30 +2770,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 	}
 
 	if (pristate) {
-		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+		result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
 						     pri_latency, level);
-		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+		result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
 	}
 
 	if (sprstate)
-		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+		result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
 
 	if (curstate)
-		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+		result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
 
 	result->enable = true;
 }
 
 static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
 	const struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(cstate->base.state);
+		to_intel_atomic_state(crtc_state->base.state);
 	const struct drm_display_mode *adjusted_mode =
-		&cstate->base.adjusted_mode;
+		&crtc_state->base.adjusted_mode;
 	u32 linetime, ips_linetime;
 
-	if (!cstate->base.active)
+	if (!crtc_state->base.active)
 		return 0;
 	if (WARN_ON(adjusted_mode->crtc_clock == 0))
 		return 0;
@@ -3101,10 +3101,10 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
 }
 
 /* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct drm_atomic_state *state = cstate->base.state;
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct intel_pipe_wm *pipe_wm;
 	struct drm_device *dev = state->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3116,9 +3116,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
 	struct ilk_wm_maximums max;
 
-	pipe_wm = &cstate->wm.ilk.optimal;
+	pipe_wm = &crtc_state->wm.ilk.optimal;
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
 		const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
 
 		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
@@ -3129,7 +3129,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 			curstate = ps;
 	}
 
-	pipe_wm->pipe_enabled = cstate->base.active;
+	pipe_wm->pipe_enabled = crtc_state->base.active;
 	if (sprstate) {
 		pipe_wm->sprites_enabled = sprstate->base.visible;
 		pipe_wm->sprites_scaled = sprstate->base.visible &&
@@ -3148,11 +3148,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 		usable_level = 0;
 
 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
-	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+	ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
 			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+		pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
 
 	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
 		return -EINVAL;
@@ -3162,7 +3162,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	for (level = 1; level <= usable_level; level++) {
 		struct intel_wm_level *wm = &pipe_wm->wm[level];
 
-		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+		ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
 				     pristate, sprstate, curstate, wm);
 
 		/*
@@ -3736,14 +3736,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-bool intel_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct intel_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
+	struct drm_device *dev = state->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct intel_crtc *crtc;
 	struct intel_plane *plane;
-	struct intel_crtc_state *cstate;
+	struct intel_crtc_state *crtc_state;
 	enum pipe pipe;
 	int level, latency;
 	int sagv_block_time_us;
@@ -3761,27 +3760,27 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 	/*
 	 * If there are no active CRTCs, no additional checks need be performed
 	 */
-	if (hweight32(intel_state->active_crtcs) == 0)
+	if (hweight32(state->active_crtcs) == 0)
 		return true;
 
 	/*
 	 * SKL+ workaround: bspec recommends we disable SAGV when we have
 	 * more then one pipe enabled
 	 */
-	if (hweight32(intel_state->active_crtcs) > 1)
+	if (hweight32(state->active_crtcs) > 1)
 		return false;
 
 	/* Since we're now guaranteed to only have one active CRTC... */
-	pipe = ffs(intel_state->active_crtcs) - 1;
+	pipe = ffs(state->active_crtcs) - 1;
 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-	cstate = to_intel_crtc_state(crtc->base.state);
+	crtc_state = to_intel_crtc_state(crtc->base.state);
 
 	if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		return false;
 
 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
 		struct skl_plane_wm *wm =
-			&cstate->wm.skl.optimal.planes[plane->id];
+			&crtc_state->wm.skl.optimal.planes[plane->id];
 
 		/* Skip this plane if it's not enabled */
 		if (!wm->wm[0].plane_en)
@@ -3812,7 +3811,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 }
 
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
-			      const struct intel_crtc_state *cstate,
+			      const struct intel_crtc_state *crtc_state,
 			      const u64 total_data_rate,
 			      const int num_active,
 			      struct skl_ddb_allocation *ddb)
@@ -3826,7 +3825,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	adjusted_mode = &cstate->base.adjusted_mode;
+	adjusted_mode = &crtc_state->base.adjusted_mode;
 	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
 
 	/*
@@ -3849,23 +3848,22 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
-				   const struct intel_crtc_state *cstate,
+				   const struct intel_crtc_state *crtc_state,
 				   const u64 total_data_rate,
 				   struct skl_ddb_allocation *ddb,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
-	struct drm_atomic_state *state = cstate->base.state;
+	struct drm_atomic_state *state = crtc_state->base.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct drm_crtc *for_crtc = cstate->base.crtc;
-	const struct drm_crtc_state *crtc_state;
-	const struct drm_crtc *crtc;
+	struct drm_crtc *for_crtc = crtc_state->base.crtc;
+	const struct intel_crtc *crtc;
 	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
 	u32 i;
 
-	if (WARN_ON(!state) || !cstate->base.active) {
+	if (WARN_ON(!state) || !crtc_state->base.active) {
 		alloc->start = 0;
 		alloc->end = 0;
 		*num_active = hweight32(dev_priv->active_crtcs);
@@ -3877,7 +3875,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	else
 		*num_active = hweight32(dev_priv->active_crtcs);
 
-	ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
 				      *num_active, ddb);
 
 	/*
@@ -3902,16 +3900,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	 * framebuffer, So instead of allocating DDB equally among pipes
 	 * distribute DDB based on resolution/width of the display.
 	 */
-	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-		const struct drm_display_mode *adjusted_mode;
+	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+		const struct drm_display_mode *adjusted_mode =
+			&crtc_state->base.adjusted_mode;
+		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
-		enum pipe pipe;
 
-		if (!crtc_state->enable)
+		if (!crtc_state->base.enable)
 			continue;
 
-		pipe = to_intel_crtc(crtc)->pipe;
-		adjusted_mode = &crtc_state->adjusted_mode;
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
 		total_width += hdisplay;
 
@@ -3930,7 +3927,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 				 u64 modifier, unsigned int rotation,
 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
 				 int color_plane);
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 				 int level,
 				 const struct skl_wm_params *wp,
 				 const struct skl_wm_level *result_prev,
@@ -4062,15 +4059,15 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
  * Caller should take care of dividing & rounding off the value.
  */
 static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
-			   const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
+			   const struct intel_plane_state *plane_state)
 {
-	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
 	u32 src_w, src_h, dst_w, dst_h;
 	uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
 	uint_fixed_16_16_t downscale_h, downscale_w;
 
-	if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+	if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
 		return u32_to_fixed16(0);
 
 	/* n.b., src is 16.16 fixed point, dst is whole integer */
@@ -4079,20 +4076,20 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
 		 * Cursors only support 0/180 degree rotation,
 		 * hence no need to account for rotation here.
 		 */
-		src_w = pstate->base.src_w >> 16;
-		src_h = pstate->base.src_h >> 16;
-		dst_w = pstate->base.crtc_w;
-		dst_h = pstate->base.crtc_h;
+		src_w = plane_state->base.src_w >> 16;
+		src_h = plane_state->base.src_h >> 16;
+		dst_w = plane_state->base.crtc_w;
+		dst_h = plane_state->base.crtc_h;
 	} else {
 		/*
 		 * Src coordinates are already rotated by 270 degrees for
 		 * the 90/270 degree plane rotation cases (to match the
 		 * GTT mapping), hence no need to account for rotation here.
 		 */
-		src_w = drm_rect_width(&pstate->base.src) >> 16;
-		src_h = drm_rect_height(&pstate->base.src) >> 16;
-		dst_w = drm_rect_width(&pstate->base.dst);
-		dst_h = drm_rect_height(&pstate->base.dst);
+		src_w = drm_rect_width(&plane_state->base.src) >> 16;
+		src_h = drm_rect_height(&plane_state->base.src) >> 16;
+		dst_w = drm_rect_width(&plane_state->base.dst);
+		dst_h = drm_rect_height(&plane_state->base.dst);
 	}
 
 	fp_w_ratio = div_fixed16(src_w, dst_w);
@@ -4137,49 +4134,46 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
 }
 
 int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
-				  struct intel_crtc_state *cstate)
+				  struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-	struct drm_crtc_state *crtc_state = &cstate->base;
-	struct drm_atomic_state *state = crtc_state->state;
+	struct drm_atomic_state *state = crtc_state->base.state;
 	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
-	struct intel_plane_state *intel_pstate;
+	const struct drm_plane_state *drm_plane_state;
 	int crtc_clock, dotclk;
 	u32 pipe_max_pixel_rate;
 	uint_fixed_16_16_t pipe_downscale;
 	uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
 
-	if (!cstate->base.enable)
+	if (!crtc_state->base.enable)
 		return 0;
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
 		uint_fixed_16_16_t plane_downscale;
 		uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
 		int bpp;
+		const struct intel_plane_state *plane_state =
+			to_intel_plane_state(drm_plane_state);
 
-		if (!intel_wm_plane_visible(cstate,
-					    to_intel_plane_state(pstate)))
+		if (!intel_wm_plane_visible(crtc_state, plane_state))
 			continue;
 
-		if (WARN_ON(!pstate->fb))
+		if (WARN_ON(!plane_state->base.fb))
 			return -EINVAL;
 
-		intel_pstate = to_intel_plane_state(pstate);
-		plane_downscale = skl_plane_downscale_amount(cstate,
-							     intel_pstate);
-		bpp = pstate->fb->format->cpp[0] * 8;
+		plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
+		bpp = plane_state->base.fb->format->cpp[0] * 8;
 		if (bpp == 64)
 			plane_downscale = mul_fixed16(plane_downscale,
 						      fp_9_div_8);
 
 		max_downscale = max_fixed16(plane_downscale, max_downscale);
 	}
-	pipe_downscale = skl_pipe_downscale_amount(cstate);
+	pipe_downscale = skl_pipe_downscale_amount(crtc_state);
 
 	pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
 
-	crtc_clock = crtc_state->adjusted_mode.crtc_clock;
+	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
 	dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
 
 	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
@@ -4196,12 +4190,11 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 }
 
 static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
-			     const struct intel_plane_state *intel_pstate,
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+			     const struct intel_plane_state *plane_state,
 			     const int plane)
 {
-	struct intel_plane *intel_plane =
-		to_intel_plane(intel_pstate->base.plane);
+	struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
 	u32 data_rate;
 	u32 width = 0, height = 0;
 	struct drm_framebuffer *fb;
@@ -4209,10 +4202,10 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	uint_fixed_16_16_t down_scale_amount;
 	u64 rate;
 
-	if (!intel_pstate->base.visible)
+	if (!plane_state->base.visible)
 		return 0;
 
-	fb = intel_pstate->base.fb;
+	fb = plane_state->base.fb;
 	format = fb->format->format;
 
 	if (intel_plane->id == PLANE_CURSOR)
@@ -4225,8 +4218,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	 * the 90/270 degree plane rotation cases (to match the
 	 * GTT mapping), hence no need to account for rotation here.
 	 */
-	width = drm_rect_width(&intel_pstate->base.src) >> 16;
-	height = drm_rect_height(&intel_pstate->base.src) >> 16;
+	width = drm_rect_width(&plane_state->base.src) >> 16;
+	height = drm_rect_height(&plane_state->base.src) >> 16;
 
 	/* UV plane does 1/2 pixel sub-sampling */
 	if (plane == 1 && is_planar_yuv_format(format)) {
@@ -4236,7 +4229,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 
 	data_rate = width * height;
 
-	down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
+	down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
 
 	rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
 
@@ -4245,35 +4238,32 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 }
 
 static u64
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 				 u64 *plane_data_rate,
 				 u64 *uv_plane_data_rate)
 {
-	struct drm_crtc_state *cstate = &intel_cstate->base;
-	struct drm_atomic_state *state = cstate->state;
+	struct drm_atomic_state *state = crtc_state->base.state;
 	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
+	const struct drm_plane_state *drm_plane_state;
 	u64 total_data_rate = 0;
 
 	if (WARN_ON(!state))
 		return 0;
 
 	/* Calculate and cache data rate for each plane */
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
 		enum plane_id plane_id = to_intel_plane(plane)->id;
+		const struct intel_plane_state *plane_state =
+			to_intel_plane_state(drm_plane_state);
 		u64 rate;
-		const struct intel_plane_state *intel_pstate =
-			to_intel_plane_state(pstate);
 
 		/* packed/y */
-		rate = skl_plane_relative_data_rate(intel_cstate,
-						    intel_pstate, 0);
+		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
 		plane_data_rate[plane_id] = rate;
 		total_data_rate += rate;
 
 		/* uv-plane */
-		rate = skl_plane_relative_data_rate(intel_cstate,
-						    intel_pstate, 1);
+		rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
 		uv_plane_data_rate[plane_id] = rate;
 		total_data_rate += rate;
 	}
@@ -4282,28 +4272,25 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 }
 
 static u64
-icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 				 u64 *plane_data_rate)
 {
-	struct drm_crtc_state *cstate = &intel_cstate->base;
-	struct drm_atomic_state *state = cstate->state;
 	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
+	const struct drm_plane_state *drm_plane_state;
 	u64 total_data_rate = 0;
 
-	if (WARN_ON(!state))
+	if (WARN_ON(!crtc_state->base.state))
 		return 0;
 
 	/* Calculate and cache data rate for each plane */
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
-		const struct intel_plane_state *intel_pstate =
-			to_intel_plane_state(pstate);
+	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
+		const struct intel_plane_state *plane_state =
+			to_intel_plane_state(drm_plane_state);
 		enum plane_id plane_id = to_intel_plane(plane)->id;
 		u64 rate;
 
-		if (!intel_pstate->linked_plane) {
-			rate = skl_plane_relative_data_rate(intel_cstate,
-							    intel_pstate, 0);
+		if (!plane_state->linked_plane) {
+			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
 			plane_data_rate[plane_id] = rate;
 			total_data_rate += rate;
 		} else {
@@ -4316,18 +4303,16 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 			 * NULL if we try get_new_plane_state(), so we
 			 * always calculate from the master.
 			 */
-			if (intel_pstate->slave)
+			if (plane_state->slave)
 				continue;
 
 			/* Y plane rate is calculated on the slave */
-			rate = skl_plane_relative_data_rate(intel_cstate,
-							    intel_pstate, 0);
-			y_plane_id = intel_pstate->linked_plane->id;
+			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+			y_plane_id = plane_state->linked_plane->id;
 			plane_data_rate[y_plane_id] = rate;
 			total_data_rate += rate;
 
-			rate = skl_plane_relative_data_rate(intel_cstate,
-							    intel_pstate, 1);
+			rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
 			plane_data_rate[plane_id] = rate;
 			total_data_rate += rate;
 		}
@@ -4337,14 +4322,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 }
 
 static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		      struct skl_ddb_allocation *ddb /* out */)
 {
-	struct drm_atomic_state *state = cstate->base.state;
-	struct drm_crtc *crtc = cstate->base.crtc;
+	struct drm_atomic_state *state = crtc_state->base.state;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
 	u16 alloc_size, start = 0;
 	u16 total[I915_MAX_PLANES] = {};
 	u16 uv_total[I915_MAX_PLANES] = {};
@@ -4357,40 +4342,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	int level;
 
 	/* Clear the partitioning for disabled planes. */
-	memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
-	memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
+	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+	memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
 
 	if (WARN_ON(!state))
 		return 0;
 
-	if (!cstate->base.active) {
+	if (!crtc_state->base.active) {
 		alloc->start = alloc->end = 0;
 		return 0;
 	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		total_data_rate =
-			icl_get_total_relative_data_rate(cstate,
+			icl_get_total_relative_data_rate(crtc_state,
 							 plane_data_rate);
 	else
 		total_data_rate =
-			skl_get_total_relative_data_rate(cstate,
+			skl_get_total_relative_data_rate(crtc_state,
 							 plane_data_rate,
 							 uv_plane_data_rate);
 
 
-	skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
 					   ddb, alloc, &num_active);
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
 
 	/* Allocate fixed number of blocks for cursor. */
-	total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
+	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
 	alloc_size -= total[PLANE_CURSOR];
-	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
 		alloc->end - total[PLANE_CURSOR];
-	cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
 
 	if (total_data_rate == 0)
 		return 0;
@@ -4403,7 +4388,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		blocks = 0;
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 			const struct skl_plane_wm *wm =
-				&cstate->wm.skl.optimal.planes[plane_id];
+				&crtc_state->wm.skl.optimal.planes[plane_id];
 
 			if (plane_id == PLANE_CURSOR) {
 				if (WARN_ON(wm->wm[level].min_ddb_alloc >
@@ -4438,7 +4423,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 */
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		const struct skl_plane_wm *wm =
-			&cstate->wm.skl.optimal.planes[plane_id];
+			&crtc_state->wm.skl.optimal.planes[plane_id];
 		u64 rate;
 		u16 extra;
 
@@ -4477,9 +4462,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	start = alloc->start;
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		struct skl_ddb_entry *plane_alloc =
-			&cstate->wm.skl.plane_ddb_y[plane_id];
+			&crtc_state->wm.skl.plane_ddb_y[plane_id];
 		struct skl_ddb_entry *uv_plane_alloc =
-			&cstate->wm.skl.plane_ddb_uv[plane_id];
+			&crtc_state->wm.skl.plane_ddb_uv[plane_id];
 
 		if (plane_id == PLANE_CURSOR)
 			continue;
@@ -4510,7 +4495,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
 		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 			struct skl_plane_wm *wm =
-				&cstate->wm.skl.optimal.planes[plane_id];
+				&crtc_state->wm.skl.optimal.planes[plane_id];
 
 			/*
 			 * We only disable the watermarks for each plane if
@@ -4547,7 +4532,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 */
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		struct skl_plane_wm *wm =
-			&cstate->wm.skl.optimal.planes[plane_id];
+			&crtc_state->wm.skl.optimal.planes[plane_id];
 
 		if (wm->trans_wm.plane_res_b >= total[plane_id])
 			memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
@@ -4599,43 +4584,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
 }
 
 static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
 {
 	u32 pixel_rate;
 	u32 crtc_htotal;
 	uint_fixed_16_16_t linetime_us;
 
-	if (!cstate->base.active)
+	if (!crtc_state->base.active)
 		return u32_to_fixed16(0);
 
-	pixel_rate = cstate->pixel_rate;
+	pixel_rate = crtc_state->pixel_rate;
 
 	if (WARN_ON(pixel_rate == 0))
 		return u32_to_fixed16(0);
 
-	crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
+	crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
 
 	return linetime_us;
 }
 
 static u32
-skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
-			      const struct intel_plane_state *pstate)
+skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+			      const struct intel_plane_state *plane_state)
 {
 	u64 adjusted_pixel_rate;
 	uint_fixed_16_16_t downscale_amount;
 
 	/* Shouldn't reach here on disabled planes... */
-	if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+	if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
 		return 0;
 
 	/*
 	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
 	 * with additional adjustments for plane-specific scaling.
 	 */
-	adjusted_pixel_rate = cstate->pixel_rate;
-	downscale_amount = skl_plane_downscale_amount(cstate, pstate);
+	adjusted_pixel_rate = crtc_state->pixel_rate;
+	downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
 
 	return mul_round_up_u32_fixed16(adjusted_pixel_rate,
 					    downscale_amount);
@@ -4768,13 +4753,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
 	return level > 0;
 }
 
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 				 int level,
 				 const struct skl_wm_params *wp,
 				 const struct skl_wm_level *result_prev,
 				 struct skl_wm_level *result /* out */)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	u32 latency = dev_priv->wm.skl_latency[level];
 	uint_fixed_16_16_t method1, method2;
 	uint_fixed_16_16_t selected_result;
@@ -4800,14 +4785,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
 	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
 				 wp->cpp, latency, wp->dbuf_block_size);
 	method2 = skl_wm_method2(wp->plane_pixel_rate,
-				 cstate->base.adjusted_mode.crtc_htotal,
+				 crtc_state->base.adjusted_mode.crtc_htotal,
 				 latency,
 				 wp->plane_blocks_per_line);
 
 	if (wp->y_tiled) {
 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
 	} else {
-		if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+		if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
 		     wp->dbuf_block_size < 1) &&
 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
 			selected_result = method2;
@@ -4894,18 +4879,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
 }
 
 static void
-skl_compute_wm_levels(const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 		      const struct skl_wm_params *wm_params,
 		      struct skl_wm_level *levels)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
 	struct skl_wm_level *result_prev = &levels[0];
 
 	for (level = 0; level <= max_level; level++) {
 		struct skl_wm_level *result = &levels[level];
 
-		skl_compute_plane_wm(cstate, level, wm_params,
+		skl_compute_plane_wm(crtc_state, level, wm_params,
 				     result_prev, result);
 
 		result_prev = result;
@@ -4913,14 +4898,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
 }
 
 static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_atomic_state *state = cstate->base.state;
+	struct drm_atomic_state *state = crtc_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->dev);
 	uint_fixed_16_16_t linetime_us;
 	u32 linetime_wm;
 
-	linetime_us = intel_get_linetime_us(cstate);
+	linetime_us = intel_get_linetime_us(crtc_state);
 	linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
 
 	/* Display WA #1135: BXT:ALL GLK:ALL */
@@ -4930,11 +4915,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	return linetime_wm;
 }
 
-static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
 				      const struct skl_wm_params *wp,
 				      struct skl_plane_wm *wm)
 {
-	struct drm_device *dev = cstate->base.crtc->dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
 	u16 trans_min, trans_y_tile_min;
 	const u16 trans_amount = 10; /* This is configurable amount */
@@ -5092,13 +5077,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
 	return 0;
 }
 
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
+static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
-	struct drm_crtc_state *crtc_state = &cstate->base;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
 	struct drm_plane *plane;
-	const struct drm_plane_state *pstate;
+	const struct drm_plane_state *drm_plane_state;
 	int ret;
 
 	/*
@@ -5107,19 +5091,20 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
 	 */
 	memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
-		const struct intel_plane_state *intel_pstate =
-						to_intel_plane_state(pstate);
+	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
+						   &crtc_state->base) {
+		const struct intel_plane_state *plane_state =
+			to_intel_plane_state(drm_plane_state);
 
 		if (INTEL_GEN(dev_priv) >= 11)
-			ret = icl_build_plane_wm(cstate, intel_pstate);
+			ret = icl_build_plane_wm(crtc_state, plane_state);
 		else
-			ret = skl_build_plane_wm(cstate, intel_pstate);
+			ret = skl_build_plane_wm(crtc_state, plane_state);
 		if (ret)
 			return ret;
 	}
 
-	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
+	pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
 
 	return 0;
 }
@@ -5273,10 +5258,10 @@ static u32
 pipes_modified(struct intel_atomic_state *state)
 {
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *cstate;
+	struct intel_crtc_state *crtc_state;
 	u32 i, ret = 0;
 
-	for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
 		ret |= drm_crtc_mask(&crtc->base);
 
 	return ret;
@@ -5652,11 +5637,11 @@ skl_compute_wm(struct intel_atomic_state *state)
 }
 
 static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
-				      struct intel_crtc_state *cstate)
+				      struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+	struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
 	enum pipe pipe = crtc->pipe;
 
 	if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
@@ -5666,9 +5651,9 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
 }
 
 static void skl_initial_wm(struct intel_atomic_state *state,
-			   struct intel_crtc_state *cstate)
+			   struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_ddb_values *results = &state->wm_results;
@@ -5678,8 +5663,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 
-	if (cstate->base.active_changed)
-		skl_atomic_update_crtc_wm(state, cstate);
+	if (crtc_state->base.active_changed)
+		skl_atomic_update_crtc_wm(state, crtc_state);
 
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5735,26 +5720,26 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 }
 
 static void ilk_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc_state *cstate)
+				   struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
-	intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
+	intel_crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
 	ilk_program_watermarks(dev_priv);
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc_state *cstate)
+				    struct intel_crtc_state *crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
-	if (cstate->wm.need_postvbl_update) {
-		intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
+	if (crtc_state->wm.need_postvbl_update) {
+		intel_crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
 		ilk_program_watermarks(dev_priv);
 	}
 	mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -5812,13 +5797,13 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
 	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
 	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
 	struct intel_crtc *crtc;
-	struct intel_crtc_state *cstate;
+	struct intel_crtc_state *crtc_state;
 
 	skl_ddb_get_hw_state(dev_priv, ddb);
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		cstate = to_intel_crtc_state(crtc->base.state);
+		crtc_state = to_intel_crtc_state(crtc->base.state);
 
-		skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+		skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
 
 		if (crtc->active)
 			hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
@@ -5835,8 +5820,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
-	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
-	struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
+	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
 	enum pipe pipe = crtc->pipe;
 	static const i915_reg_t wm0_pipe_reg[] = {
 		[PIPE_A] = WM0_PIPEA_ILK,
@@ -6891,9 +6876,10 @@ void gen6_rps_boost(struct i915_request *rq)
 	/* Serializes with i915_request_retire() */
 	boost = false;
 	spin_lock_irqsave(&rq->lock, flags);
-	if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+	if (!i915_request_has_waitboost(rq) &&
+	    !dma_fence_is_signaled_locked(&rq->fence)) {
 		boost = !atomic_fetch_inc(&rps->num_waiters);
-		rq->waitboost = true;
+		rq->flags |= I915_REQUEST_WAITBOOST;
 	}
 	spin_unlock_irqrestore(&rq->lock, flags);
 	if (!boost)
@@ -7192,7 +7178,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
 	 * next request to execute. If the idle hysteresis is less than that
 	 * interrupt service latency, the hardware will automatically gate
 	 * the power well and we will then incur the wake up cost on top of
-	 * the service latency. A similar guide from intel_pstate is that we
+	 * the service latency. A similar guide from plane_state is that we
 	 * do not want the enable hysteresis to less than the wakeup latency.
 	 *
 	 * igt/gem_exec_nop/sequential provides a rough estimate for the
@@ -7271,7 +7257,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 	 * next request to execute. If the idle hysteresis is less than that
 	 * interrupt service latency, the hardware will automatically gate
 	 * the power well and we will then incur the wake up cost on top of
-	 * the service latency. A similar guide from intel_pstate is that we
+	 * the service latency. A similar guide from plane_state is that we
 	 * do not want the enable hysteresis to less than the wakeup latency.
 	 *
 	 * igt/gem_exec_nop/sequential provides a rough estimate for the
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 1b489fa..e3573e1 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -10,10 +10,10 @@
 
 #include "i915_reg.h"
 
-struct drm_atomic_state;
 struct drm_device;
 struct drm_i915_private;
 struct i915_request;
+struct intel_atomic_state;
 struct intel_crtc;
 struct intel_crtc_state;
 struct intel_plane;
@@ -52,7 +52,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 			      struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
 void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
+bool intel_can_enable_sagv(struct intel_atomic_state *state);
 int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 502c544..8d1aebc 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
 static void
 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
 {
-	struct drm_printer p;
+	if (debug->count) {
+		struct drm_printer p = drm_debug_printer("i915");
 
-	if (!debug->count)
-		return;
-
-	p = drm_debug_printer("i915");
-	__print_intel_runtime_pm_wakeref(&p, debug);
+		__print_intel_runtime_pm_wakeref(&p, debug);
+	}
 
 	kfree(debug->owners);
 }
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index ae45651..fdf00f1 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -218,6 +218,53 @@ static void guc_free_load_err_log(struct intel_guc *guc)
 		i915_gem_object_put(guc->load_err_log);
 }
 
+/*
+ * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
+ * register using the same bits used in the CT message payload. Since our
+ * communication channel with guc is turned off at this point, we can save the
+ * message and handle it after we turn it back on.
+ */
+static void guc_clear_mmio_msg(struct intel_guc *guc)
+{
+	intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0);
+}
+
+static void guc_get_mmio_msg(struct intel_guc *guc)
+{
+	u32 val;
+
+	spin_lock_irq(&guc->irq_lock);
+
+	val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15));
+	guc->mmio_msg |= val & guc->msg_enabled_mask;
+
+	/*
+	 * clear all events, including the ones we're not currently servicing,
+	 * to make sure we don't try to process a stale message if we enable
+	 * handling of more events later.
+	 */
+	guc_clear_mmio_msg(guc);
+
+	spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_handle_mmio_msg(struct intel_guc *guc)
+{
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+
+	/* we need communication to be enabled to reply to GuC */
+	GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+
+	if (!guc->mmio_msg)
+		return;
+
+	spin_lock_irq(&i915->irq_lock);
+	intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+	spin_unlock_irq(&i915->irq_lock);
+
+	guc->mmio_msg = 0;
+}
+
 static void guc_reset_interrupts(struct intel_guc *guc)
 {
 	guc->interrupts.reset(guc_to_i915(guc));
@@ -235,9 +282,30 @@ static void guc_disable_interrupts(struct intel_guc *guc)
 
 static int guc_enable_communication(struct intel_guc *guc)
 {
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	int ret;
+
+	ret = intel_guc_ct_enable(&guc->ct);
+	if (ret)
+		return ret;
+
+	guc->send = intel_guc_send_ct;
+	guc->handler = intel_guc_to_host_event_handler_ct;
+
+	/* check for mmio messages received before/during the CT enable */
+	guc_get_mmio_msg(guc);
+	guc_handle_mmio_msg(guc);
+
 	guc_enable_interrupts(guc);
 
-	return intel_guc_ct_enable(&guc->ct);
+	/* check for CT messages received before we enabled interrupts */
+	spin_lock_irq(&i915->irq_lock);
+	intel_guc_to_host_event_handler_ct(guc);
+	spin_unlock_irq(&i915->irq_lock);
+
+	DRM_INFO("GuC communication enabled\n");
+
+	return 0;
 }
 
 static void guc_stop_communication(struct intel_guc *guc)
@@ -246,16 +314,35 @@ static void guc_stop_communication(struct intel_guc *guc)
 
 	guc->send = intel_guc_send_nop;
 	guc->handler = intel_guc_to_host_event_handler_nop;
+
+	guc_clear_mmio_msg(guc);
 }
 
 static void guc_disable_communication(struct intel_guc *guc)
 {
-	intel_guc_ct_disable(&guc->ct);
+	/*
+	 * Events generated during or after CT disable are logged by guc in
+	 * via mmio. Make sure the register is clear before disabling CT since
+	 * all events we cared about have already been processed via CT.
+	 */
+	guc_clear_mmio_msg(guc);
 
 	guc_disable_interrupts(guc);
 
 	guc->send = intel_guc_send_nop;
 	guc->handler = intel_guc_to_host_event_handler_nop;
+
+	intel_guc_ct_disable(&guc->ct);
+
+	/*
+	 * Check for messages received during/after the CT disable. We do not
+	 * expect any messages to have arrived via CT between the interrupt
+	 * disable and the CT disable because GuC should've been idle until we
+	 * triggered the CT disable protocol.
+	 */
+	guc_get_mmio_msg(guc);
+
+	DRM_INFO("GuC communication disabled\n");
 }
 
 int intel_uc_init_misc(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index ff98f86..24e6646 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -25,6 +25,8 @@
 #ifndef _INTEL_UC_FW_H_
 #define _INTEL_UC_FW_H_
 
+#include <linux/types.h>
+
 struct drm_printer;
 struct drm_i915_private;
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index da33aa6..4015e96 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -78,6 +78,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
 static inline void
 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
 {
+	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
+	d->uncore->fw_domains_timer |= d->mask;
 	d->wake_count++;
 	hrtimer_start_range_ns(&d->timer,
 			       NSEC_PER_MSEC,
@@ -322,7 +324,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
 
 	/* On VLV, FIFO will be shared by both SW and HW.
 	 * So, we need to read the FREE_ENTRIES everytime */
-	if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+	if (IS_VALLEYVIEW(uncore->i915))
 		n = fifo_free_entries(uncore);
 	else
 		n = uncore->fifo_count;
@@ -344,7 +346,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 {
 	struct intel_uncore_forcewake_domain *domain =
 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
-	struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
+	struct intel_uncore *uncore = domain->uncore;
 	unsigned long irqflags;
 
 	assert_rpm_device_not_suspended(uncore->rpm);
@@ -353,9 +355,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
 		return HRTIMER_RESTART;
 
 	spin_lock_irqsave(&uncore->lock, irqflags);
-	if (WARN_ON(domain->wake_count == 0))
-		domain->wake_count++;
 
+	uncore->fw_domains_timer &= ~domain->mask;
+
+	GEM_BUG_ON(!domain->wake_count);
 	if (--domain->wake_count == 0)
 		uncore->funcs.force_wake_put(uncore, domain->mask);
 
@@ -485,15 +488,13 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
 	return ret;
 }
 
-static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
-					  unsigned int restore_forcewake)
+static void forcewake_early_sanitize(struct intel_uncore *uncore,
+				     unsigned int restore_forcewake)
 {
-	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(uncore))
-		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
 
 	/* WaDisableShadowRegForCpd:chv */
-	if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+	if (IS_CHERRYVIEW(uncore->i915)) {
 		__raw_uncore_write32(uncore, GTFIFOCTL,
 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
@@ -515,6 +516,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 
 void intel_uncore_suspend(struct intel_uncore *uncore)
 {
+	if (!intel_uncore_has_forcewake(uncore))
+		return;
+
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
 		&uncore->pmic_bus_access_nb);
@@ -526,21 +530,24 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
 {
 	unsigned int restore_forcewake;
 
+	if (intel_uncore_unclaimed_mmio(uncore))
+		DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+
+	if (!intel_uncore_has_forcewake(uncore))
+		return;
+
 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
-	__intel_uncore_early_sanitize(uncore, restore_forcewake);
+	forcewake_early_sanitize(uncore, restore_forcewake);
 
 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
 {
-	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
-}
+	if (!intel_uncore_has_forcewake(uncore))
+		return;
 
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
-{
-	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_sanitize_gt_powersave(dev_priv);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
@@ -628,7 +635,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
 	spin_lock_irq(&uncore->lock);
 	if (!--uncore->user_forcewake.count) {
 		if (intel_uncore_unclaimed_mmio(uncore))
-			dev_info(uncore_to_i915(uncore)->drm.dev,
+			dev_info(uncore->i915->drm.dev,
 				 "Invalid mmio detected during user access\n");
 
 		uncore->unclaimed_mmio_check =
@@ -669,8 +676,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
 	fw_domains &= uncore->fw_domains;
 
 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
-		if (WARN_ON(domain->wake_count == 0))
-			continue;
+		GEM_BUG_ON(!domain->wake_count);
 
 		if (--domain->wake_count) {
 			domain->active = true;
@@ -734,15 +740,42 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
 void assert_forcewakes_active(struct intel_uncore *uncore,
 			      enum forcewake_domains fw_domains)
 {
+	struct intel_uncore_forcewake_domain *domain;
+	unsigned int tmp;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+		return;
+
 	if (!uncore->funcs.force_wake_get)
 		return;
 
+	spin_lock_irq(&uncore->lock);
+
 	assert_rpm_wakelock_held(uncore->rpm);
 
 	fw_domains &= uncore->fw_domains;
 	WARN(fw_domains & ~uncore->fw_domains_active,
 	     "Expected %08x fw_domains to be active, but %08x are off\n",
 	     fw_domains, fw_domains & ~uncore->fw_domains_active);
+
+	/*
+	 * Check that the caller has an explicit wakeref and we don't mistake
+	 * it for the auto wakeref.
+	 */
+	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+		unsigned int actual = READ_ONCE(domain->wake_count);
+		unsigned int expect = 1;
+
+		if (uncore->fw_domains_timer & domain->mask)
+			expect++; /* pending automatic release */
+
+		if (WARN(actual < expect,
+			 "Expected domain %d to be held awake by caller, count=%d\n",
+			 domain->id, actual))
+			break;
+	}
+
+	spin_unlock_irq(&uncore->lock);
 }
 
 /* We give fast paths for the really cool registers */
@@ -901,6 +934,12 @@ static bool is_gen##x##_shadowed(u32 offset) \
 __is_genX_shadowed(8)
 __is_genX_shadowed(11)
 
+static enum forcewake_domains
+gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+	return FORCEWAKE_RENDER;
+}
+
 #define __gen8_reg_write_fw_domains(uncore, offset) \
 ({ \
 	enum forcewake_domains __fwd; \
@@ -1123,8 +1162,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
 static inline void __force_wake_auto(struct intel_uncore *uncore,
 				     enum forcewake_domains fw_domains)
 {
-	if (WARN_ON(!fw_domains))
-		return;
+	GEM_BUG_ON(!fw_domains);
 
 	/* Turn on all requested but inactive supported forcewake domains. */
 	fw_domains &= uncore->fw_domains;
@@ -1145,26 +1183,23 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
 	val = __raw_uncore_read##x(uncore, reg); \
 	GEN6_READ_FOOTER; \
 }
-#define __gen6_read(x) __gen_read(gen6, x)
-#define __fwtable_read(x) __gen_read(fwtable, x)
-#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
 
-__gen11_fwtable_read(8)
-__gen11_fwtable_read(16)
-__gen11_fwtable_read(32)
-__gen11_fwtable_read(64)
-__fwtable_read(8)
-__fwtable_read(16)
-__fwtable_read(32)
-__fwtable_read(64)
-__gen6_read(8)
-__gen6_read(16)
-__gen6_read(32)
-__gen6_read(64)
+#define __gen_reg_read_funcs(func) \
+static enum forcewake_domains \
+func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+	return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_read(func, 8) \
+__gen_read(func, 16) \
+__gen_read(func, 32) \
+__gen_read(func, 64)
 
-#undef __gen11_fwtable_read
-#undef __fwtable_read
-#undef __gen6_read
+__gen_reg_read_funcs(gen11_fwtable);
+__gen_reg_read_funcs(fwtable);
+__gen_reg_read_funcs(gen6);
+
+#undef __gen_reg_read_funcs
 #undef GEN6_READ_FOOTER
 #undef GEN6_READ_HEADER
 
@@ -1225,6 +1260,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace)
 	__raw_uncore_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
 
 #define __gen_write(func, x) \
 static void \
@@ -1237,38 +1275,33 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac
 	__raw_uncore_write##x(uncore, reg, val); \
 	GEN6_WRITE_FOOTER; \
 }
-#define __gen8_write(x) __gen_write(gen8, x)
-#define __fwtable_write(x) __gen_write(fwtable, x)
-#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
 
-__gen11_fwtable_write(8)
-__gen11_fwtable_write(16)
-__gen11_fwtable_write(32)
-__fwtable_write(8)
-__fwtable_write(16)
-__fwtable_write(32)
-__gen8_write(8)
-__gen8_write(16)
-__gen8_write(32)
-__gen6_write(8)
-__gen6_write(16)
-__gen6_write(32)
+#define __gen_reg_write_funcs(func) \
+static enum forcewake_domains \
+func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+	return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_write(func, 8) \
+__gen_write(func, 16) \
+__gen_write(func, 32)
 
-#undef __gen11_fwtable_write
-#undef __fwtable_write
-#undef __gen8_write
-#undef __gen6_write
+__gen_reg_write_funcs(gen11_fwtable);
+__gen_reg_write_funcs(fwtable);
+__gen_reg_write_funcs(gen8);
+
+#undef __gen_reg_write_funcs
 #undef GEN6_WRITE_FOOTER
 #undef GEN6_WRITE_HEADER
 
-#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
 do { \
 	(uncore)->funcs.mmio_writeb = x##_write8; \
 	(uncore)->funcs.mmio_writew = x##_write16; \
 	(uncore)->funcs.mmio_writel = x##_write32; \
 } while (0)
 
-#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
 do { \
 	(uncore)->funcs.mmio_readb = x##_read8; \
 	(uncore)->funcs.mmio_readw = x##_read16; \
@@ -1276,24 +1309,39 @@ do { \
 	(uncore)->funcs.mmio_readq = x##_read64; \
 } while (0)
 
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+do { \
+	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
+	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
+} while (0)
 
-static void fw_domain_init(struct intel_uncore *uncore,
-			   enum forcewake_domain_id domain_id,
-			   i915_reg_t reg_set,
-			   i915_reg_t reg_ack)
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+do { \
+	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
+	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
+} while (0)
+
+static int __fw_domain_init(struct intel_uncore *uncore,
+			    enum forcewake_domain_id domain_id,
+			    i915_reg_t reg_set,
+			    i915_reg_t reg_ack)
 {
 	struct intel_uncore_forcewake_domain *d;
 
-	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
-		return;
+	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
+	GEM_BUG_ON(uncore->fw_domain[domain_id]);
 
-	d = &uncore->fw_domain[domain_id];
+	if (i915_inject_load_failure())
+		return -ENOMEM;
 
-	WARN_ON(d->wake_count);
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
 
 	WARN_ON(!i915_mmio_reg_valid(reg_set));
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
+	d->uncore = uncore;
 	d->wake_count = 0;
 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
@@ -1310,7 +1358,6 @@ static void fw_domain_init(struct intel_uncore *uncore,
 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
 
-
 	d->mask = BIT(domain_id);
 
 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1319,6 +1366,10 @@ static void fw_domain_init(struct intel_uncore *uncore,
 	uncore->fw_domains |= BIT(domain_id);
 
 	fw_domain_reset(d);
+
+	uncore->fw_domain[domain_id] = d;
+
+	return 0;
 }
 
 static void fw_domain_fini(struct intel_uncore *uncore,
@@ -1326,30 +1377,41 @@ static void fw_domain_fini(struct intel_uncore *uncore,
 {
 	struct intel_uncore_forcewake_domain *d;
 
-	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
+
+	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
+	if (!d)
 		return;
 
-	d = &uncore->fw_domain[domain_id];
-
-	WARN_ON(d->wake_count);
-	WARN_ON(hrtimer_cancel(&d->timer));
-	memset(d, 0, sizeof(*d));
-
 	uncore->fw_domains &= ~BIT(domain_id);
+	WARN_ON(d->wake_count);
+	WARN_ON(hrtimer_cancel(&d->timer));
+	kfree(d);
 }
 
-static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
+static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	struct intel_uncore_forcewake_domain *d;
+	int tmp;
 
-	if (!intel_uncore_has_forcewake(uncore))
-		return;
+	for_each_fw_domain(d, uncore, tmp)
+		fw_domain_fini(uncore, d->id);
+}
+
+static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore->i915;
+	int ret = 0;
+
+	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
+
+#define fw_domain_init(uncore__, id__, set__, ack__) \
+	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
 
 	if (INTEL_GEN(i915) >= 11) {
 		int i;
 
-		uncore->funcs.force_wake_get =
-			fw_domains_get_with_fallback;
+		uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
 		uncore->funcs.force_wake_put = fw_domains_put;
 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
@@ -1357,6 +1419,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
+
 		for (i = 0; i < I915_MAX_VCS; i++) {
 			if (!HAS_ENGINE(i915, _VCS(i)))
 				continue;
@@ -1374,8 +1437,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
 		}
 	} else if (IS_GEN_RANGE(i915, 9, 10)) {
-		uncore->funcs.force_wake_get =
-			fw_domains_get_with_fallback;
+		uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
 		uncore->funcs.force_wake_put = fw_domains_put;
 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
@@ -1424,8 +1486,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
 		__raw_posting_read(uncore, ECOBUS);
 
-		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
-			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
+				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+		if (ret)
+			goto out;
 
 		spin_lock_irq(&uncore->lock);
 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
@@ -1436,6 +1500,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
 			DRM_INFO("when using vblank-synced partial screen updates.\n");
+			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 				       FORCEWAKE, FORCEWAKE_ACK);
 		}
@@ -1447,8 +1512,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 			       FORCEWAKE, FORCEWAKE_ACK);
 	}
 
+#undef fw_domain_init
+
 	/* All future platforms are expected to require complex power gating */
-	WARN_ON(uncore->fw_domains == 0);
+	WARN_ON(!ret && uncore->fw_domains == 0);
+
+out:
+	if (ret)
+		intel_uncore_fw_domains_fini(uncore);
+
+	return ret;
 }
 
 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
@@ -1493,7 +1566,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 
 static int uncore_mmio_setup(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	struct drm_i915_private *i915 = uncore->i915;
 	struct pci_dev *pdev = i915->drm.pdev;
 	int mmio_bar;
 	int mmio_size;
@@ -1523,49 +1596,46 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
 
 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
-	struct pci_dev *pdev = i915->drm.pdev;
+	struct pci_dev *pdev = uncore->i915->drm.pdev;
 
 	pci_iounmap(pdev, uncore->regs);
 }
 
-void intel_uncore_init_early(struct intel_uncore *uncore)
+void intel_uncore_init_early(struct intel_uncore *uncore,
+			     struct drm_i915_private *i915)
 {
 	spin_lock_init(&uncore->lock);
+	uncore->i915 = i915;
+	uncore->rpm = &i915->runtime_pm;
 }
 
-int intel_uncore_init_mmio(struct intel_uncore *uncore)
+static void uncore_raw_init(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
+
+	if (IS_GEN(uncore->i915, 5)) {
+		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
+		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
+	} else {
+		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
+		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
+	}
+}
+
+static int uncore_forcewake_init(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore->i915;
 	int ret;
 
-	ret = uncore_mmio_setup(uncore);
+	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
+
+	ret = intel_uncore_fw_domains_init(uncore);
 	if (ret)
 		return ret;
 
-	i915_check_vgpu(i915);
+	forcewake_early_sanitize(uncore, 0);
 
-	if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
-		uncore->flags |= UNCORE_HAS_FORCEWAKE;
-
-	intel_uncore_fw_domains_init(uncore);
-	__intel_uncore_early_sanitize(uncore, 0);
-
-	uncore->unclaimed_mmio_check = 1;
-	uncore->pmic_bus_access_nb.notifier_call =
-		i915_pmic_bus_access_notifier;
-
-	uncore->rpm = &i915->runtime_pm;
-
-	if (!intel_uncore_has_forcewake(uncore)) {
-		if (IS_GEN(i915, 5)) {
-			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
-			ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
-		} else {
-			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
-			ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
-		}
-	} else if (IS_GEN_RANGE(i915, 6, 7)) {
+	if (IS_GEN_RANGE(i915, 6, 7)) {
 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
 
 		if (IS_VALLEYVIEW(i915)) {
@@ -1579,7 +1649,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
 			ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
 			ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
 			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-
 		} else {
 			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
 			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
@@ -1594,6 +1663,40 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
 		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
 	}
 
+	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+	return 0;
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+	struct drm_i915_private *i915 = uncore->i915;
+	int ret;
+
+	ret = uncore_mmio_setup(uncore);
+	if (ret)
+		return ret;
+
+	if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+		uncore->flags |= UNCORE_HAS_FORCEWAKE;
+
+	uncore->unclaimed_mmio_check = 1;
+
+	if (!intel_uncore_has_forcewake(uncore)) {
+		uncore_raw_init(uncore);
+	} else {
+		ret = uncore_forcewake_init(uncore);
+		if (ret)
+			goto out_mmio_cleanup;
+	}
+
+	/* make sure fw funcs are set if and only if we have fw*/
+	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
+	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
+	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
+
 	if (HAS_FPGA_DBG_UNCLAIMED(i915))
 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
 
@@ -1603,9 +1706,16 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
 	if (IS_GEN_RANGE(i915, 6, 7))
 		uncore->flags |= UNCORE_HAS_FIFO;
 
-	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+	/* clear out unclaimed reg detection bit */
+	if (check_for_unclaimed_mmio(uncore))
+		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	return 0;
+
+out_mmio_cleanup:
+	uncore_mmio_cleanup(uncore);
+
+	return ret;
 }
 
 /*
@@ -1615,45 +1725,46 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
  */
 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
 {
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+	struct drm_i915_private *i915 = uncore->i915;
+	enum forcewake_domains fw_domains = uncore->fw_domains;
+	enum forcewake_domain_id domain_id;
+	int i;
 
-	if (INTEL_GEN(i915) >= 11) {
-		enum forcewake_domains fw_domains = uncore->fw_domains;
-		enum forcewake_domain_id domain_id;
-		int i;
+	if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+		return;
 
-		for (i = 0; i < I915_MAX_VCS; i++) {
-			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+	for (i = 0; i < I915_MAX_VCS; i++) {
+		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
 
-			if (HAS_ENGINE(i915, _VCS(i)))
-				continue;
+		if (HAS_ENGINE(i915, _VCS(i)))
+			continue;
 
-			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(uncore, domain_id);
-		}
+		if (fw_domains & BIT(domain_id))
+			fw_domain_fini(uncore, domain_id);
+	}
 
-		for (i = 0; i < I915_MAX_VECS; i++) {
-			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+	for (i = 0; i < I915_MAX_VECS; i++) {
+		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
 
-			if (HAS_ENGINE(i915, _VECS(i)))
-				continue;
+		if (HAS_ENGINE(i915, _VECS(i)))
+			continue;
 
-			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(uncore, domain_id);
-		}
+		if (fw_domains & BIT(domain_id))
+			fw_domain_fini(uncore, domain_id);
 	}
 }
 
 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
 {
-	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_uncore_sanitize(uncore_to_i915(uncore));
+	if (intel_uncore_has_forcewake(uncore)) {
+		iosf_mbi_punit_acquire();
+		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+			&uncore->pmic_bus_access_nb);
+		intel_uncore_forcewake_reset(uncore);
+		intel_uncore_fw_domains_fini(uncore);
+		iosf_mbi_punit_release();
+	}
 
-	iosf_mbi_punit_acquire();
-	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&uncore->pmic_bus_access_nb);
-	intel_uncore_forcewake_reset(uncore);
-	iosf_mbi_punit_release();
 	uncore_mmio_cleanup(uncore);
 }
 
@@ -1871,62 +1982,6 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
 	return ret;
 }
 
-static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
-				i915_reg_t reg)
-{
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
-	u32 offset = i915_mmio_reg_offset(reg);
-	enum forcewake_domains fw_domains;
-
-	if (INTEL_GEN(i915) >= 11) {
-		fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
-	} else if (HAS_FWTABLE(i915)) {
-		fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
-	} else if (INTEL_GEN(i915) >= 6) {
-		fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
-	} else {
-		/* on devices with FW we expect to hit one of the above cases */
-		if (intel_uncore_has_forcewake(uncore))
-			MISSING_CASE(INTEL_GEN(i915));
-
-		fw_domains = 0;
-	}
-
-	WARN_ON(fw_domains & ~uncore->fw_domains);
-
-	return fw_domains;
-}
-
-static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
-				 i915_reg_t reg)
-{
-	struct drm_i915_private *i915 = uncore_to_i915(uncore);
-	u32 offset = i915_mmio_reg_offset(reg);
-	enum forcewake_domains fw_domains;
-
-	if (INTEL_GEN(i915) >= 11) {
-		fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
-	} else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
-		fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
-	} else if (IS_GEN(i915, 8)) {
-		fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
-	} else if (IS_GEN_RANGE(i915, 6, 7)) {
-		fw_domains = FORCEWAKE_RENDER;
-	} else {
-		/* on devices with FW we expect to hit one of the above cases */
-		if (intel_uncore_has_forcewake(uncore))
-			MISSING_CASE(INTEL_GEN(i915));
-
-		fw_domains = 0;
-	}
-
-	WARN_ON(fw_domains & ~uncore->fw_domains);
-
-	return fw_domains;
-}
-
 /**
  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
  * 				    a register
@@ -1953,10 +2008,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 		return 0;
 
 	if (op & FW_REG_READ)
-		fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
+		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
 
 	if (op & FW_REG_WRITE)
-		fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
+		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
+
+	WARN_ON(fw_domains & ~uncore->fw_domains);
 
 	return fw_domains;
 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 804a0fa..2f6ffa3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -70,6 +70,11 @@ struct intel_uncore_funcs {
 	void (*force_wake_put)(struct intel_uncore *uncore,
 			       enum forcewake_domains domains);
 
+	enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
+						  i915_reg_t r);
+	enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
+						   i915_reg_t r);
+
 	u8 (*mmio_readb)(struct intel_uncore *uncore,
 			 i915_reg_t r, bool trace);
 	u16 (*mmio_readw)(struct intel_uncore *uncore,
@@ -97,6 +102,7 @@ struct intel_forcewake_range {
 struct intel_uncore {
 	void __iomem *regs;
 
+	struct drm_i915_private *i915;
 	struct intel_runtime_pm *rpm;
 
 	spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -117,9 +123,11 @@ struct intel_uncore {
 
 	enum forcewake_domains fw_domains;
 	enum forcewake_domains fw_domains_active;
+	enum forcewake_domains fw_domains_timer;
 	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
 
 	struct intel_uncore_forcewake_domain {
+		struct intel_uncore *uncore;
 		enum forcewake_domain_id id;
 		enum forcewake_domains mask;
 		unsigned int wake_count;
@@ -127,7 +135,7 @@ struct intel_uncore {
 		struct hrtimer timer;
 		u32 __iomem *reg_set;
 		u32 __iomem *reg_ack;
-	} fw_domain[FW_DOMAIN_ID_COUNT];
+	} *fw_domain[FW_DOMAIN_ID_COUNT];
 
 	struct {
 		unsigned int count;
@@ -141,18 +149,12 @@ struct intel_uncore {
 
 /* Iterate over initialised fw domains */
 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
-	for (tmp__ = (mask__); \
-	     tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+	for (tmp__ = (mask__); tmp__ ;) \
+		for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
 
 #define for_each_fw_domain(domain__, uncore__, tmp__) \
 	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
 
-static inline struct intel_uncore *
-forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
-{
-	return container_of(d, struct intel_uncore, fw_domain[d->id]);
-}
-
 static inline bool
 intel_uncore_has_forcewake(const struct intel_uncore *uncore)
 {
@@ -177,8 +179,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
 	return uncore->flags & UNCORE_HAS_FIFO;
 }
 
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init_early(struct intel_uncore *uncore);
+void intel_uncore_init_early(struct intel_uncore *uncore,
+			     struct drm_i915_private *i915);
 int intel_uncore_init_mmio(struct intel_uncore *uncore);
 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 3db6fa6..06bd8b2 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -5,7 +5,7 @@
  */
 
 #include "intel_runtime_pm.h"
-#include "i915_gem.h"
+#include "intel_wakeref.h"
 
 static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
 {
@@ -17,7 +17,7 @@ static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
 	intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
 
 	intel_runtime_pm_put(rpm, wakeref);
-	GEM_BUG_ON(!wakeref);
+	INTEL_WAKEREF_BUG_ON(!wakeref);
 }
 
 int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
@@ -48,6 +48,7 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
 	atomic_inc(&wf->count);
 	mutex_unlock(&wf->mutex);
 
+	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
 	return 0;
 }
 
@@ -115,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
 	if (!refcount_inc_not_zero(&wf->count)) {
 		spin_lock_irqsave(&wf->lock, flags);
 		if (!refcount_inc_not_zero(&wf->count)) {
-			GEM_BUG_ON(wf->wakeref);
+			INTEL_WAKEREF_BUG_ON(wf->wakeref);
 			wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
 			refcount_set(&wf->count, 1);
 		}
@@ -134,5 +135,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
 {
 	intel_wakeref_auto(wf, 0);
-	GEM_BUG_ON(wf->wakeref);
+	INTEL_WAKEREF_BUG_ON(wf->wakeref);
 }
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 9cbb2eb..1d6f598 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -13,6 +13,12 @@
 #include <linux/stackdepot.h>
 #include <linux/timer.h>
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
+#else
+#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
 struct intel_runtime_pm;
 
 typedef depot_stack_handle_t intel_wakeref_t;
@@ -66,6 +72,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
 }
 
 /**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+	return atomic_inc_not_zero(&wf->count);
+}
+
+/**
  * intel_wakeref_put: Release the wakeref
  * @i915: the drm_i915_private device
  * @wf: the wakeref
@@ -86,6 +107,7 @@ intel_wakeref_put(struct intel_runtime_pm *rpm,
 		  struct intel_wakeref *wf,
 		  int (*fn)(struct intel_wakeref *wf))
 {
+	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
 	if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
 		return __intel_wakeref_put_last(rpm, wf, fn);
 
@@ -121,13 +143,13 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
 }
 
 /**
- * intel_wakeref_active: Query whether the wakeref is currently held
+ * intel_wakeref_is_active: Query whether the wakeref is currently held
  * @wf: the wakeref
  *
  * Returns: true if the wakeref is currently held.
  */
 static inline bool
-intel_wakeref_active(struct intel_wakeref *wf)
+intel_wakeref_is_active(const struct intel_wakeref *wf)
 {
 	return READ_ONCE(wf->wakeref);
 }
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b4ba84..8c85078 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -225,17 +225,18 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
 	return 0;
 }
 
-static inline int write_and_verify(struct drm_i915_private *dev_priv,
-				   i915_reg_t reg, u32 val, u32 mask,
-				   u32 locked_bit)
+static int
+write_and_verify(struct intel_gt *gt,
+		 i915_reg_t reg, u32 val, u32 mask, u32 locked_bit)
 {
+	struct intel_uncore *uncore = gt->uncore;
 	u32 reg_val;
 
 	GEM_BUG_ON(val & ~mask);
 
-	I915_WRITE(reg, val);
+	intel_uncore_write(uncore, reg, val);
 
-	reg_val = I915_READ(reg);
+	reg_val = intel_uncore_read(uncore, reg);
 
 	return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
 }
@@ -243,6 +244,7 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv,
 /**
  * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
  * @wopcm: pointer to intel_wopcm.
+ * @gt: pointer to the containing GT
  *
  * Setup the GuC WOPCM size and offset registers with the calculated values. It
  * will verify the register values to make sure the registers are locked with
@@ -250,29 +252,30 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv,
  *
  * Return: 0 on success. -EIO if registers were locked with incorrect values.
  */
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt)
 {
-	struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
+	struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+	struct intel_uncore *uncore = gt->uncore;
 	u32 huc_agent;
 	u32 mask;
 	int err;
 
-	if (!USES_GUC(dev_priv))
+	if (!USES_GUC(i915))
 		return 0;
 
-	GEM_BUG_ON(!HAS_GUC(dev_priv));
+	GEM_BUG_ON(!HAS_GUC(i915));
 	GEM_BUG_ON(!wopcm->guc.size);
 	GEM_BUG_ON(!wopcm->guc.base);
 
-	err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
+	err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size,
 			       GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
 			       GUC_WOPCM_SIZE_LOCKED);
 	if (err)
 		goto err_out;
 
-	huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
+	huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0;
 	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
-	err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
+	err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
 			       wopcm->guc.base | huc_agent, mask,
 			       GUC_WOPCM_OFFSET_VALID);
 	if (err)
@@ -283,8 +286,9 @@ int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
 err_out:
 	DRM_ERROR("Failed to init WOPCM registers:\n");
 	DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
-		  I915_READ(DMA_GUC_WOPCM_OFFSET));
-	DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
+		  intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+	DRM_ERROR("GUC_WOPCM_SIZE=%#x\n",
+		  intel_uncore_read(uncore, GUC_WOPCM_SIZE));
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 1144019..56aaed4 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -9,6 +9,8 @@
 
 #include <linux/types.h>
 
+struct intel_gt;
+
 /**
  * struct intel_wopcm - Overall WOPCM info and WOPCM regions.
  * @size: Size of overall WOPCM.
@@ -41,6 +43,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
 
 void intel_wopcm_init_early(struct intel_wopcm *wopcm);
 int intel_wopcm_init(struct intel_wopcm *wopcm);
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/Makefile
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_bdw.c
rename to drivers/gpu/drm/i915/oa/i915_oa_bdw.c
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_bdw.h
rename to drivers/gpu/drm/i915/oa/i915_oa_bdw.h
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_bxt.c
rename to drivers/gpu/drm/i915/oa/i915_oa_bxt.c
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_bxt.h
rename to drivers/gpu/drm/i915/oa/i915_oa_bxt.h
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cflgt2.c
rename to drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cflgt2.h
rename to drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cflgt3.c
rename to drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cflgt3.h
rename to drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_chv.c
rename to drivers/gpu/drm/i915/oa/i915_oa_chv.c
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_chv.h
rename to drivers/gpu/drm/i915/oa/i915_oa_chv.h
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cnl.c
rename to drivers/gpu/drm/i915/oa/i915_oa_cnl.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_cnl.h
rename to drivers/gpu/drm/i915/oa/i915_oa_cnl.h
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_glk.c
rename to drivers/gpu/drm/i915/oa/i915_oa_glk.c
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_glk.h
rename to drivers/gpu/drm/i915/oa/i915_oa_glk.h
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_hsw.c
rename to drivers/gpu/drm/i915/oa/i915_oa_hsw.c
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_hsw.h
rename to drivers/gpu/drm/i915/oa/i915_oa_hsw.h
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_icl.c
rename to drivers/gpu/drm/i915/oa/i915_oa_icl.c
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_icl.h
rename to drivers/gpu/drm/i915/oa/i915_oa_icl.h
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_kblgt2.c
rename to drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_kblgt2.h
rename to drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_kblgt3.c
rename to drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_kblgt3.h
rename to drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt2.c
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt2.h
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt3.c
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt3.h
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt4.c
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
similarity index 100%
rename from drivers/gpu/drm/i915/i915_oa_sklgt4.h
rename to drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index c0b3537..84fce37 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,6 +4,8 @@
  * Copyright © 2018 Intel Corporation
  */
 
+#include <linux/kref.h>
+
 #include "gem/i915_gem_pm.h"
 
 #include "i915_selftest.h"
@@ -13,38 +15,87 @@
 
 struct live_active {
 	struct i915_active base;
+	struct kref ref;
 	bool retired;
 };
 
-static void __live_active_retire(struct i915_active *base)
+static void __live_get(struct live_active *active)
+{
+	kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+	i915_active_fini(&active->base);
+	kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+	struct live_active *active = container_of(ref, typeof(*active), ref);
+
+	__live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+	kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+	struct live_active *active = container_of(base, typeof(*active), base);
+
+	__live_get(active);
+	return 0;
+}
+
+static void __live_retire(struct i915_active *base)
 {
 	struct live_active *active = container_of(base, typeof(*active), base);
 
 	active->retired = true;
+	__live_put(active);
 }
 
-static int __live_active_setup(struct drm_i915_private *i915,
-			       struct live_active *active)
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+	struct live_active *active;
+
+	active = kzalloc(sizeof(*active), GFP_KERNEL);
+	if (!active)
+		return NULL;
+
+	kref_init(&active->ref);
+	i915_active_init(i915, &active->base, __live_active, __live_retire);
+
+	return active;
+}
+
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	struct i915_sw_fence *submit;
+	struct live_active *active;
 	enum intel_engine_id id;
 	unsigned int count = 0;
 	int err = 0;
 
+	active = __live_alloc(i915);
+	if (!active)
+		return ERR_PTR(-ENOMEM);
+
 	submit = heap_fence_create(GFP_KERNEL);
-	if (!submit)
-		return -ENOMEM;
-
-	i915_active_init(i915, &active->base, __live_active_retire);
-	active->retired = false;
-
-	if (!i915_active_acquire(&active->base)) {
-		pr_err("First i915_active_acquire should report being idle\n");
-		err = -EINVAL;
-		goto out;
+	if (!submit) {
+		kfree(active);
+		return ERR_PTR(-ENOMEM);
 	}
 
+	err = i915_active_acquire(&active->base);
+	if (err)
+		goto out;
+
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
 
@@ -74,74 +125,92 @@ static int __live_active_setup(struct drm_i915_private *i915,
 		pr_err("i915_active retired before submission!\n");
 		err = -EINVAL;
 	}
-	if (active->base.count != count) {
+	if (atomic_read(&active->base.count) != count) {
 		pr_err("i915_active not tracking all requests, found %d, expected %d\n",
-		       active->base.count, count);
+		       atomic_read(&active->base.count), count);
 		err = -EINVAL;
 	}
 
 out:
 	i915_sw_fence_commit(submit);
 	heap_fence_put(submit);
+	if (err) {
+		__live_put(active);
+		active = ERR_PTR(err);
+	}
 
-	return err;
+	return active;
 }
 
 static int live_active_wait(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct live_active active;
+	struct live_active *active;
 	intel_wakeref_t wakeref;
-	int err;
+	int err = 0;
 
 	/* Check that we get a callback when requests retire upon waiting */
 
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-	err = __live_active_setup(i915, &active);
+	active = __live_active_setup(i915);
+	if (IS_ERR(active)) {
+		err = PTR_ERR(active);
+		goto err;
+	}
 
-	i915_active_wait(&active.base);
-	if (!active.retired) {
+	i915_active_wait(&active->base);
+	if (!active->retired) {
 		pr_err("i915_active not retired after waiting!\n");
 		err = -EINVAL;
 	}
 
-	i915_active_fini(&active.base);
+	__live_put(active);
+
 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
 		err = -EIO;
 
+err:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
+
 	return err;
 }
 
 static int live_active_retire(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct live_active active;
+	struct live_active *active;
 	intel_wakeref_t wakeref;
-	int err;
+	int err = 0;
 
 	/* Check that we get a callback when requests are indirectly retired */
 
 	mutex_lock(&i915->drm.struct_mutex);
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-	err = __live_active_setup(i915, &active);
+	active = __live_active_setup(i915);
+	if (IS_ERR(active)) {
+		err = PTR_ERR(active);
+		goto err;
+	}
 
 	/* waits for & retires all requests */
 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
 		err = -EIO;
 
-	if (!active.retired) {
+	if (!active->retired) {
 		pr_err("i915_active not retired after flushing!\n");
 		err = -EINVAL;
 	}
 
-	i915_active_fini(&active.base);
+	__live_put(active);
+
+err:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6..b8ffae4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915)
 	 * that runtime-pm just works.
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		intel_gt_sanitize(i915, false);
+		intel_gt_sanitize(&i915->gt, false);
 		i915_gem_sanitize(i915);
 		i915_gem_resume(i915);
 	}
@@ -154,8 +154,6 @@ static int igt_gem_suspend(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = switch_to_context(i915, ctx);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 out:
 	mock_file_free(i915, file);
@@ -195,8 +193,6 @@ static int igt_gem_hibernate(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = switch_to_context(i915, ctx);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
-		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 out:
 	mock_file_free(i915, file);
@@ -213,5 +209,5 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
 	if (i915_terminally_wedged(i915))
 		return 0;
 
-	return i915_subtests(tests, i915);
+	return i915_live_subtests(tests, i915);
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1a60b9f..31a51ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg)
 	}
 
 err_ppgtt_cleanup:
-	mutex_lock(&dev_priv->drm.struct_mutex);
 	i915_vm_put(&ppgtt->vm);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
 	return err;
 }
 
@@ -1195,7 +1193,7 @@ static int igt_ggtt_page(void *arg)
 		iowrite32(n, vaddr + n);
 		io_mapping_unmap_atomic(vaddr);
 	}
-	i915_gem_flush_ggtt_writes(i915);
+	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 
 	i915_random_reorder(order, count, &prng);
 	for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc442..2b31a4e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,7 @@
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
 selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(timelines, intel_timeline_live_selftests)
 selftest(requests, i915_request_live_selftests)
 selftest(active, i915_active_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb17..b55da4d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
 selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
 selftest(phys, i915_gem_phys_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 298bb71..1bbfc43 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,6 +27,8 @@
 #include "gem/i915_gem_pm.h"
 #include "gem/selftests/mock_context.h"
 
+#include "gt/intel_gt.h"
+
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
@@ -73,55 +75,58 @@ static int igt_wait_request(void *arg)
 		err = -ENOMEM;
 		goto out_unlock;
 	}
+	i915_request_get(request);
 
 	if (i915_request_wait(request, 0, 0) != -ETIME) {
 		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) != -ETIME) {
 		pr_err("request wait succeeded (expected timeout before submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_completed(request)) {
 		pr_err("request completed before submit!!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	i915_request_add(request);
 
 	if (i915_request_wait(request, 0, 0) != -ETIME) {
 		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_completed(request)) {
 		pr_err("request completed immediately!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T / 2) != -ETIME) {
 		pr_err("request wait succeeded (expected timeout!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) == -ETIME) {
 		pr_err("request wait timed out!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (!i915_request_completed(request)) {
 		pr_err("request not complete after waiting!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) == -ETIME) {
 		pr_err("request wait timed out when already complete!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	err = 0;
+out_request:
+	i915_request_put(request);
 out_unlock:
 	mock_device_flush(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -366,13 +371,15 @@ static int __igt_breadcrumbs_smoketest(void *arg)
 
 		if (!wait_event_timeout(wait->wait,
 					i915_sw_fence_done(wait),
-					HZ / 2)) {
+					5 * HZ)) {
 			struct i915_request *rq = requests[count - 1];
 
-			pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
-			       count,
+			pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+			       atomic_read(&wait->pending), count,
 			       rq->fence.context, rq->fence.seqno,
 			       t->engine->name);
+			GEM_TRACE_DUMP();
+
 			i915_gem_set_wedged(t->engine->i915);
 			GEM_BUG_ON(!i915_request_completed(rq));
 			i915_sw_fence_wait(wait);
@@ -622,7 +629,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
 	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
-	i915_gem_chipset_flush(i915);
+	intel_gt_chipset_flush(&i915->gt);
 
 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
@@ -791,7 +798,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	__i915_gem_object_flush_map(obj, 0, 64);
 	i915_gem_object_unpin_map(obj);
 
-	i915_gem_chipset_flush(i915);
+	intel_gt_chipset_flush(&i915->gt);
 
 	return vma;
 
@@ -809,7 +816,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
 		return PTR_ERR(cmd);
 
 	*cmd = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(batch->vm->i915);
+	intel_gt_chipset_flush(batch->vm->gt);
 
 	i915_gem_object_unpin_map(batch->obj);
 
@@ -1031,7 +1038,7 @@ static int live_sequential_engines(void *arg)
 					      I915_MAP_WC);
 		if (!IS_ERR(cmd)) {
 			*cmd = MI_BATCH_BUFFER_END;
-			i915_gem_chipset_flush(i915);
+			intel_gt_chipset_flush(engine->gt);
 
 			i915_gem_object_unpin_map(request[id]->batch->obj);
 		}
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index b18eaef..f46ccf8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -26,6 +26,8 @@
 #include "../i915_drv.h"
 #include "../i915_selftest.h"
 
+#include "igt_flush_test.h"
+
 struct i915_selftest i915_selftest __read_mostly = {
 	.timeout_ms = 1000,
 };
@@ -240,7 +242,40 @@ static bool apply_subtest_filter(const char *caller, const char *name)
 	return result;
 }
 
+int __i915_nop_setup(void *data)
+{
+	return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+	return err;
+}
+
+int __i915_live_setup(void *data)
+{
+	struct drm_i915_private *i915 = data;
+
+	return i915_terminally_wedged(i915);
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+	struct drm_i915_private *i915 = data;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	i915_gem_drain_freed_objects(i915);
+
+	return err;
+}
+
 int __i915_subtests(const char *caller,
+		    int (*setup)(void *data),
+		    int (*teardown)(int err, void *data),
 		    const struct i915_subtest *st,
 		    unsigned int count,
 		    void *data)
@@ -255,10 +290,17 @@ int __i915_subtests(const char *caller,
 		if (!apply_subtest_filter(caller, st->name))
 			continue;
 
+		err = setup(data);
+		if (err) {
+			pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+			       caller, st->name);
+			return err;
+		}
+
 		pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
 		GEM_TRACE("Running %s/%s\n", caller, st->name);
 
-		err = st->func(data);
+		err = teardown(st->func(data), data);
 		if (err && err != -EINTR) {
 			pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
 			       caller, st->name, err);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1e59b54..89b6552 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
  *
  * Copyright © 2018 Intel Corporation
  */
+#include "gt/intel_gt.h"
 
 #include "gem/selftests/igt_gem_utils.h"
 
@@ -18,6 +19,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
 
 	memset(spin, 0, sizeof(*spin));
 	spin->i915 = i915;
+	spin->gt = &i915->gt;
 
 	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(spin->hws)) {
@@ -94,6 +96,8 @@ igt_spinner_create_request(struct igt_spinner *spin,
 	u32 *batch;
 	int err;
 
+	spin->gt = engine->gt;
+
 	vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
 	if (IS_ERR(vma))
 		return ERR_CAST(vma);
@@ -138,7 +142,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
 	*batch++ = upper_32_bits(vma->node.start);
 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
 
-	i915_gem_chipset_flush(spin->i915);
+	intel_gt_chipset_flush(engine->gt);
 
 	if (engine->emit_init_breadcrumb &&
 	    rq->timeline->has_initial_breadcrumb) {
@@ -172,7 +176,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
 void igt_spinner_end(struct igt_spinner *spin)
 {
 	*spin->batch = MI_BATCH_BUFFER_END;
-	i915_gem_chipset_flush(spin->i915);
+	intel_gt_chipset_flush(spin->gt);
 }
 
 void igt_spinner_fini(struct igt_spinner *spin)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 34a88ac..1bfc39e 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -14,8 +14,11 @@
 #include "i915_request.h"
 #include "i915_selftest.h"
 
+struct intel_gt;
+
 struct igt_spinner {
 	struct drm_i915_private *i915;
+	struct intel_gt *gt;
 	struct drm_i915_gem_object *hws;
 	struct drm_i915_gem_object *obj;
 	u32 *batch;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 64bc514..2741805 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,7 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 
+#include "gt/intel_gt.h"
 #include "gt/mock_engine.h"
 
 #include "mock_request.h"
@@ -67,7 +68,7 @@ static void mock_device_release(struct drm_device *dev)
 	i915_gem_contexts_fini(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	i915_timelines_fini(i915);
+	intel_timelines_fini(i915);
 
 	drain_workqueue(i915->wq);
 	i915_gem_drain_freed_objects(i915);
@@ -179,7 +180,7 @@ struct drm_i915_private *mock_gem_device(void)
 
 	mock_uncore_init(&i915->uncore);
 	i915_gem_init__mm(i915);
-	intel_gt_pm_init(i915);
+	intel_gt_init_early(&i915->gt, i915);
 	atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
 
 	init_waitqueue_head(&i915->gpu_error.wait_queue);
@@ -198,11 +199,7 @@ struct drm_i915_private *mock_gem_device(void)
 
 	i915->gt.awake = true;
 
-	i915_timelines_init(i915);
-
-	INIT_LIST_HEAD(&i915->gt.active_rings);
-	INIT_LIST_HEAD(&i915->gt.closed_vma);
-	spin_lock_init(&i915->gt.closed_lock);
+	intel_timelines_init(i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
 
@@ -233,7 +230,7 @@ struct drm_i915_private *mock_gem_device(void)
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
-	i915_timelines_fini(i915);
+	intel_timelines_fini(i915);
 	destroy_workqueue(i915->wq);
 err_drv:
 	drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f625c30..e62a67e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 {
 	memset(ggtt, 0, sizeof(*ggtt));
 
+	ggtt->vm.gt = &i915->gt;
 	ggtt->vm.i915 = i915;
 	ggtt->vm.is_ggtt = true;
 
@@ -116,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+	intel_gt_init_hw(i915);
 }
 
 void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
deleted file mode 100644
index b6deaa6..0000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#ifndef __MOCK_TIMELINE__
-#define __MOCK_TIMELINE__
-
-struct i915_timeline;
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
-
-#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ff8999c..49585f1 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -41,6 +41,6 @@ __nop_read(64)
 
 void mock_uncore_init(struct intel_uncore *uncore)
 {
-	ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
-	ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
+	ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
 }
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 6d60ea6..6c342ac 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -568,7 +568,8 @@
 	INTEL_VGA_DEVICE(0x8A56, info), \
 	INTEL_VGA_DEVICE(0x8A71, info), \
 	INTEL_VGA_DEVICE(0x8A70, info), \
-	INTEL_VGA_DEVICE(0x8A53, info)
+	INTEL_VGA_DEVICE(0x8A53, info), \
+	INTEL_VGA_DEVICE(0x8A54, info)
 
 #define INTEL_ICL_11_IDS(info) \
 	INTEL_ICL_PORT_F_IDS(info), \
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 328d05e..469dc51 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
 #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
 #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
+#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
 
 #define I915_PARAM_HUC_STATUS		 42